blob: 9050fc64a649e0dc303c5cfbfa1ddba071529058 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007#include "WorkloadTestUtils.hpp"
8
telsoa014fcda012018-03-09 14:13:49 +00009#include "QuantizeHelper.hpp"
10
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000011#include <armnn/ArmNN.hpp>
12
13#include <Permute.hpp>
14
15#include <backendsCommon/CpuTensorHandle.hpp>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000016#include <backendsCommon/IBackendInternal.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000017#include <backendsCommon/WorkloadFactory.hpp>
18#include <backendsCommon/WorkloadInfo.hpp>
19
20#include <test/TensorHelpers.hpp>
21
James Conroy45a9b772018-10-31 11:47:53 +000022#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000023
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000024#include <algorithm>
25#include <string>
26
telsoa014fcda012018-03-09 14:13:49 +000027template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000028LayerTestResult<T, 4> SimplePooling2dTestImpl(
29 armnn::IWorkloadFactory& workloadFactory,
30 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
31 armnn::Pooling2dDescriptor descriptor,
32 float qScale,
33 int32_t qOffset,
34 const boost::multi_array<T, 4>& input,
35 const boost::multi_array<T, 4>& outputExpected)
telsoa014fcda012018-03-09 14:13:49 +000036{
Matthew Bentham8800c002018-11-19 13:19:28 +000037 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
38 const armnn::DataLayoutIndexed dimensionIndices = dataLayout;
39 auto heightIndex = dimensionIndices.GetHeightIndex();
40 auto widthIndex = dimensionIndices.GetWidthIndex();
41 auto channelsIndex = dimensionIndices.GetChannelsIndex();
telsoa014fcda012018-03-09 14:13:49 +000042
James Conroy69482272018-10-19 10:41:35 +010043 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
44 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
45 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
46 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
47
48 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
49 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
50 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
telsoa014fcda012018-03-09 14:13:49 +000051 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
52
James Conroy45a9b772018-10-31 11:47:53 +000053 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
54 inputWidth, dataLayout);
55 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
56 outputWidth, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +000057
58 // Set quantization parameters if the requested type is a quantized type.
59 if(armnn::IsQuantizedType<T>())
60 {
61 inputTensorInfo.SetQuantizationScale(qScale);
62 inputTensorInfo.SetQuantizationOffset(qOffset);
63 outputTensorInfo.SetQuantizationScale(qScale);
64 outputTensorInfo.SetQuantizationOffset(qOffset);
65 }
66
67 LayerTestResult<T, 4> result(outputTensorInfo);
68
69 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
70 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
71
72 armnn::Pooling2dQueueDescriptor queueDescriptor;
73 queueDescriptor.m_Parameters = descriptor;
James Conroy45a9b772018-10-31 11:47:53 +000074 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +010075
76 armnn::WorkloadInfo workloadInfo;
77 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
78 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
79
80 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +010081 armnn::BackendId backend = workloadFactory.GetBackendId();
Francis Murtagh043d0d02018-10-05 14:08:48 +010082 const size_t reasonIfUnsupportedMaxLen = 255;
83 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +010084 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
Francis Murtagh043d0d02018-10-05 14:08:48 +010085 queueDescriptor.m_Parameters,
86 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
87 if (!result.supported)
88 {
89 return result;
90 }
91
92 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
93
94 inputHandle->Allocate();
95 outputHandle->Allocate();
96
97 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
98
99 workload->Execute();
100
101 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
102
103 result.outputExpected = outputExpected;
104
105 return result;
106}
107
telsoa014fcda012018-03-09 14:13:49 +0000108//
109// Tests max pooling with the following parameters:
110//
111// Pooling size: 3x3
112// Stride: (2,4)
113// input size: 8x13
114// channels: 2
115// batch size: 2
116//
117template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000118LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
119 armnn::IWorkloadFactory& workloadFactory,
120 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
121 bool forceNoPadding,
122 float qScale = 1.0f,
123 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000124{
125 armnn::Pooling2dDescriptor descriptor;
126 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
127 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
128 descriptor.m_StrideX = 2;
129 descriptor.m_StrideY = 4;
130 // forceNoPadding is mainly used for compatibility with ARM Compute.
131 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
132 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
133 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
134 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
135 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
136
137 unsigned int inputWidth = 8;
138 unsigned int inputHeight = 13;
139 unsigned int outputWidth =
140 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
141 descriptor.m_StrideX;
142 unsigned int outputHeight =
143 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
144 descriptor.m_StrideY;
145 unsigned int channels = 2;
146 unsigned int batchSize = 2;
147
148 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
149 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
150
151 // Set quantization parameters if the requested type is a quantized type.
152 if(armnn::IsQuantizedType<T>())
153 {
154 inputTensorInfo.SetQuantizationScale(qScale);
155 inputTensorInfo.SetQuantizationOffset(qOffset);
156 outputTensorInfo.SetQuantizationScale(qScale);
157 outputTensorInfo.SetQuantizationOffset(qOffset);
158 }
159
160 std::vector<float> singleChannelData({
161 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
162 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
163 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
164 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
165 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
166 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
167 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
168 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
169 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
170 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
171 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
172 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
173 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
174 });
175
telsoa01c577f2c2018-08-31 09:22:23 +0100176 // Constructs input data.
telsoa014fcda012018-03-09 14:13:49 +0000177 std::vector<float> inputData;
178 auto negator = [](float f) { return -f; };
179
telsoa01c577f2c2018-08-31 09:22:23 +0100180 // First image (two channels where the second channel is the negative of the first one).
telsoa014fcda012018-03-09 14:13:49 +0000181 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
182 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
183
telsoa01c577f2c2018-08-31 09:22:23 +0100184 // Second image (same as first image).
telsoa014fcda012018-03-09 14:13:49 +0000185 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
186 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
187
188 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000191 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
192 boost::multi_array<T, 4> outputExpected(shape);
193 if (forceNoPadding)
194 {
195 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
196 QuantizedVector<T>(qScale, qOffset, {
197 8.0f, 8.0f, 8.0f,
198 9.0f, 7.0f, 9.0f,
199 9.0f, 9.0f, 9.0f,
200
201 0.0f, 0.0f, -3.0f,
202 -1.0f, 0.0f, 0.0f,
203 -1.0f, -1.0f, -1.0f,
204
205 8.0f, 8.0f, 8.0f,
206 9.0f, 7.0f, 9.0f,
207 9.0f, 9.0f, 9.0f,
208
209 0.0f, 0.0f, -3.0f,
210 -1.0f, 0.0f, 0.0f,
211 -1.0f, -1.0f, -1.0f
212 }));
213 }
214 else
215 {
216 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
217 QuantizedVector<T>(qScale, qOffset, {
218 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
219 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
220 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
221
222 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
223 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
224 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f,
225
226 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
227 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
228 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
229
230 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
231 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
232 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
233 }));
234 }
235
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000236 return SimplePooling2dTestImpl<T>(
237 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000238}
239
240template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000241LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
242 armnn::IWorkloadFactory& workloadFactory,
243 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000244 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000245 float qScale = 1.0f,
246 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000247{
248 armnn::Pooling2dDescriptor descriptor;
James Conroy45a9b772018-10-31 11:47:53 +0000249 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
telsoa014fcda012018-03-09 14:13:49 +0000250 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
251 descriptor.m_StrideX = descriptor.m_StrideY = 2;
telsoa014fcda012018-03-09 14:13:49 +0000252 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy69482272018-10-19 10:41:35 +0100253 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000254
James Conroy45a9b772018-10-31 11:47:53 +0000255 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
256 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
telsoa014fcda012018-03-09 14:13:49 +0000257
258 // Set quantization parameters if the requested type is a quantized type.
259 if(armnn::IsQuantizedType<T>())
260 {
261 inputTensorInfo.SetQuantizationScale(qScale);
262 inputTensorInfo.SetQuantizationOffset(qOffset);
263 outputTensorInfo.SetQuantizationScale(qScale);
264 outputTensorInfo.SetQuantizationOffset(qOffset);
265 }
266
James Conroy45a9b772018-10-31 11:47:53 +0000267 std::vector<T> inputData(
telsoa014fcda012018-03-09 14:13:49 +0000268 QuantizedVector<T>(qScale, qOffset, {
James Conroy45a9b772018-10-31 11:47:53 +0000269 1.0f, 2.0f, 5.0f, 6.0f,
270 3.0f, 4.0f, 7.0f, 8.0f,
271 9.0f, 10.0f, 13.0f, 14.0f,
272 11.0f, 12.0f, 15.0f, 16.0f,
273
274 17.0f, 18.0f, 21.0f, 22.0f,
275 19.0f, 20.0f, 23.0f, 24.0f,
276 25.0f, 26.0f, 29.0f, 30.0f,
277 27.0f, 28.0f, 31.0f, 32.0f,
telsoa014fcda012018-03-09 14:13:49 +0000278 }));
279
James Conroy45a9b772018-10-31 11:47:53 +0000280 std::vector<T> outputData(
telsoa014fcda012018-03-09 14:13:49 +0000281 QuantizedVector<T>(qScale, qOffset, {
James Conroy45a9b772018-10-31 11:47:53 +0000282 4.0f, 8.0f,
283 12.0f, 16.0f,
284
285 20.0f, 24.0f,
286 28.0f, 32.0f,
telsoa014fcda012018-03-09 14:13:49 +0000287 }));
288
James Conroy45a9b772018-10-31 11:47:53 +0000289 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000290 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000291 {
292 std::vector<T> tmp(inputData.size());
293 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
294 inputData = tmp;
295
296 std::vector<T> tmp1(outputData.size());
297 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
298 outputData = tmp1;
299 }
300
301 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
302
303 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
304
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000305 return SimplePooling2dTestImpl<T>(
306 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000307}
308
309template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000310LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
311 armnn::IWorkloadFactory& workloadFactory,
312 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000313 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000314 float qScale = 1.0f,
315 int32_t qOffset = 0)
Francis Murtagh043d0d02018-10-05 14:08:48 +0100316{
James Conroy45a9b772018-10-31 11:47:53 +0000317 armnn::Pooling2dDescriptor descriptor;
318 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
319 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
320 descriptor.m_StrideX = descriptor.m_StrideY = 2;
321 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
322 descriptor.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +0100323
James Conroy45a9b772018-10-31 11:47:53 +0000324 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
325 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100326
James Conroy45a9b772018-10-31 11:47:53 +0000327 // Set quantization parameters if the requested type is a quantized type.
328 if(armnn::IsQuantizedType<T>())
329 {
330 inputTensorInfo.SetQuantizationScale(qScale);
331 inputTensorInfo.SetQuantizationOffset(qOffset);
332 outputTensorInfo.SetQuantizationScale(qScale);
333 outputTensorInfo.SetQuantizationOffset(qOffset);
334 }
Francis Murtagh043d0d02018-10-05 14:08:48 +0100335
James Conroy45a9b772018-10-31 11:47:53 +0000336 std::vector<T> inputData(
337 QuantizedVector<T>(qScale, qOffset, {
338 2.0f, 2.0f, 6.0f, 6.0f,
339 4.0f, 4.0f, 8.0f, 8.0f,
340 10.0f, 12.0f, 14.0f, 16.0f,
341 10.0f, 12.0f, 16.0f, 14.0f,
342
343 18.0f, 20.0f, 24.0f, 22.0f,
344 20.0f, 18.0f, 22.0f, 24.0f,
345 26.0f, 28.0f, 0.0f, 0.0f,
346 26.0f, 28.0f, 0.0f, 0.0f,
347 }));
348
349 std::vector<T> outputData(
350 QuantizedVector<T>(qScale, qOffset, {
351 3.0f, 7.0f,
352 11.0f, 15.0f,
353
354 19.0f, 23.0f,
355 27.0f, 0.0f,
356 }));
357
358 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000359 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000360 {
361 std::vector<T> tmp(inputData.size());
362 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
363 inputData = tmp;
364
365 std::vector<T> tmp1(outputData.size());
366 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
367 outputData = tmp1;
368 }
369
370 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
371
372 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
373
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000374 return SimplePooling2dTestImpl<T>(
375 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100376}
377
378template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000379LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
380 armnn::IWorkloadFactory& workloadFactory,
381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
382 float qScale = 1.0f,
383 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000384{
385 armnn::Pooling2dDescriptor descriptor;
386 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
387 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
388 descriptor.m_StrideX = descriptor.m_StrideY = 5;
389 descriptor.m_PadLeft = 50;
390 descriptor.m_PadRight = 50;
391 descriptor.m_PadTop = 50;
392 descriptor.m_PadBottom = 50;
393 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
394
395 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>());
396 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>());
397
398 // Set quantization parameters if the requested type is a quantized type.
399 if(armnn::IsQuantizedType<T>())
400 {
401 inputTensorInfo.SetQuantizationScale(qScale);
402 inputTensorInfo.SetQuantizationOffset(qOffset);
403 outputTensorInfo.SetQuantizationScale(qScale);
404 outputTensorInfo.SetQuantizationOffset(qOffset);
405 }
406
407 std::vector<T> inputVec;
408
409 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
410 {
411 inputVec.push_back(1);
412 }
413
414 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
415
416 std::vector<T> outputVec;
417
418 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
419 {
420 outputVec.push_back(1);
421 }
422
423 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
424
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000425 return SimplePooling2dTestImpl<T>(
426 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000427}
428
429template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000430LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
431 armnn::IWorkloadFactory& workloadFactory,
432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000433 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000434 float qScale = 1.0f,
435 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000436{
437 armnn::Pooling2dDescriptor descriptor;
438 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
439 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
440 descriptor.m_StrideX = descriptor.m_StrideY = 2;
441 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy45a9b772018-10-31 11:47:53 +0000442 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000443
James Conroy45a9b772018-10-31 11:47:53 +0000444 armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
445 armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
446
447 std::vector<T> inputData(
telsoa014fcda012018-03-09 14:13:49 +0000448 QuantizedVector<T>(qScale, qOffset, {
James Conroy45a9b772018-10-31 11:47:53 +0000449 1.0f, 7.0f, 5.0f, 5.0f,
450 1.0f, 7.0f, 5.0f, 5.0f,
451 3.0f, 3.0f, 1.0f, 1.0f,
452 3.0f, 3.0f, 1.0f, 1.0f,
453
454 1.0f, 7.0f, 0.0f, 0.0f,
455 1.0f, 7.0f, 2.0f, 0.0f,
456 0.0f, 2.0f, 1.0f, 1.0f,
457 0.0f, 0.0f, 1.0f, 1.0f,
telsoa014fcda012018-03-09 14:13:49 +0000458 }));
459
James Conroy45a9b772018-10-31 11:47:53 +0000460 std::vector<T> outputData(
telsoa014fcda012018-03-09 14:13:49 +0000461 QuantizedVector<T>(qScale, qOffset, {
462 5.0f, 5.0f,
James Conroy45a9b772018-10-31 11:47:53 +0000463 3.0f, 1.0f,
464
465 5.0f, 1.0f,
466 1.0f, 1.0f,
telsoa014fcda012018-03-09 14:13:49 +0000467 }));
468
James Conroy45a9b772018-10-31 11:47:53 +0000469 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000470 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000471 {
472 std::vector<T> tmp(inputData.size());
473 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
474 inputData = tmp;
475
476 std::vector<T> tmp1(outputData.size());
477 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
478 outputData = tmp1;
479 }
480
481 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
482
483 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
484
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000485 return SimplePooling2dTestImpl<T>(
486 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000487}
488
489template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000490LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
491 armnn::IWorkloadFactory& workloadFactory,
492 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
493 float qScale = 1.0f,
494 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000495{
496 armnn::Pooling2dDescriptor descriptor;
497 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
498 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
499 descriptor.m_StrideX = descriptor.m_StrideY = 1;
500 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
501
502 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
503 auto input = MakeTensor<T, 4>(inputTensorInfo,
504 QuantizedVector<T>(qScale, qOffset, {
505 2.0f, 1.0f, 5.0f, 2.0f,
506 1.0f, 2.0f, 2.0f, 1.0f,
507 5.0f, 4.0f, 1.0f, 5.0f,
508 2.0f, 1.0f, 5.0f, 2.0f,
509 }));
510
511 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
512 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
513 QuantizedVector<T>(qScale, qOffset, {
514 3.0f, 3.0f,
515 3.0f, 3.0f,
516 }));
517
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000518 return SimplePooling2dTestImpl<T>(
519 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000520}
521
522template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000523LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
524 armnn::IWorkloadFactory& workloadFactory,
525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
526 float qScale = 1.0f,
527 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000528{
529 armnn::Pooling2dDescriptor descriptor;
530 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
531 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
532 descriptor.m_StrideX = descriptor.m_StrideY = 3;
533 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
534
535 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
536 auto input = MakeTensor<T, 4>(inputTensorInfo,
537 QuantizedVector<T>(qScale, qOffset, {
538 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
539 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
540 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
541 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
542 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
543 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
544 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
545 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
546 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
547 }));
548
549 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
550 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
551 QuantizedVector<T>(qScale, qOffset, {
552 3.0f, 3.0f, 3.0f,
553 3.0f, 3.0f, 3.0f,
554 3.0f, 3.0f, 3.0f,
555 }));
556
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000557 return SimplePooling2dTestImpl<T>(
558 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000559}
560
561template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000562LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
563 armnn::IWorkloadFactory& workloadFactory,
564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
565 float qScale = 1.0f,
566 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000567{
568 armnn::Pooling2dDescriptor descriptor;
569 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
570 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
571 descriptor.m_StrideX = descriptor.m_StrideY = 4;
572 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
573
574 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
575 auto input = MakeTensor<T, 4>(inputTensorInfo,
576 QuantizedVector<T>(qScale, qOffset, {
577 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
578 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
579 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
580 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
581 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
582 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
583 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
584 }));
585
586 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
587 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
588 QuantizedVector<T>(qScale, qOffset, {
589 3.0f, 3.0f,
590 3.0f, 3.0f,
591 }));
592
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000593 return SimplePooling2dTestImpl<T>(
594 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000595}
596
597template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000598LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
599 armnn::IWorkloadFactory& workloadFactory,
600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
601 float qScale = 1.0f,
602 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000603{
604 armnn::Pooling2dDescriptor descriptor;
605 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
606 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
607 descriptor.m_StrideX = descriptor.m_StrideY = 7;
608 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
609
610 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
611 auto input = MakeTensor<T, 4>(inputTensorInfo,
612 QuantizedVector<T>(qScale, qOffset, {
613 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
614 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
615 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
616 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
617 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
618 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
619 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
620 }));
621
622 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
623 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
624 QuantizedVector<T>(qScale, qOffset, {
625 3.0f,
626 }));
627
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000628 return SimplePooling2dTestImpl<T>(
629 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000630}
631
632template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000633LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
634 armnn::IWorkloadFactory& workloadFactory,
635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
636 float qScale = 1.0f,
637 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000638{
639 armnn::Pooling2dDescriptor descriptor;
640 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
641 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
642 descriptor.m_StrideX = descriptor.m_StrideY = 9;
643 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
644
645 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
646 auto input = MakeTensor<T, 4>(inputTensorInfo,
647 QuantizedVector<T>(qScale, qOffset, {
648 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
649 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
650 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
651 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
652 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
653 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
654 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
655 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
656 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
657 }));
658
659 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
660 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
661 QuantizedVector<T>(qScale, qOffset, {
662 3.0f,
663 }));
664
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000665 return SimplePooling2dTestImpl<T>(
666 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000667}
668
669template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000670LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
671 armnn::IWorkloadFactory& workloadFactory,
672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
673 float qScale = 1.0f,
674 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000675{
676 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>());
677 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
678
679 armnn::Pooling2dDescriptor descriptor;
680 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
681 descriptor.m_PoolWidth = 2;
682 descriptor.m_PoolHeight = 3;
683 descriptor.m_StrideX = 2;
684 descriptor.m_StrideY = 1;
685 descriptor.m_PadLeft = 2;
686 descriptor.m_PadRight = 0;
687 descriptor.m_PadTop = 1;
688 descriptor.m_PadBottom = 2;
689 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
690 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
691
telsoa01c577f2c2018-08-31 09:22:23 +0100692 // Construct input data.
telsoa014fcda012018-03-09 14:13:49 +0000693 auto input = MakeTensor<T, 4>(inputTensorInfo,
694 QuantizedVector<T>(qScale, qOffset, {
695 1.0f, 3.0f, 4.0f,
696 }));
697
telsoa01c577f2c2018-08-31 09:22:23 +0100698 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000699 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
700 QuantizedVector<T>(qScale, qOffset, {
701 0.0f, 3.0f, 0.0f, 3.0f,
702 }));
703
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000704 return SimplePooling2dTestImpl<T>(
705 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000706}
707
708template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000709LayerTestResult<T, 4> ComparePooling2dTestCommon(
710 armnn::IWorkloadFactory& workloadFactory,
711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
712 armnn::IWorkloadFactory& refWorkloadFactory,
713 armnn::PoolingAlgorithm poolingType,
714 float qScale = 1.0f,
715 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000716{
717 const unsigned int inputWidth = 16;
718 const unsigned int inputHeight = 32;
719 const unsigned int channelCount = 2;
720 const unsigned int batchSize = 5;
721
722 const unsigned int poolSize = 3;
723 const unsigned int strideX = 2;
724 const unsigned int strideY = 4;
725 const unsigned int padX = 0;
726 const unsigned int padY = 0;
727
728 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
729 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
730
731 armnn::TensorInfo inputTensorInfo;
732 armnn::TensorInfo outputTensorInfo;
733
734 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
735 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
736
737 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
738 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
739
740 // Set quantization parameters if the requested type is a quantized type.
741 if(armnn::IsQuantizedType<T>())
742 {
743 inputTensorInfo.SetQuantizationScale(qScale);
744 inputTensorInfo.SetQuantizationOffset(qOffset);
745 outputTensorInfo.SetQuantizationScale(qScale);
746 outputTensorInfo.SetQuantizationOffset(qOffset);
747 }
748
749 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
750
751 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
752
753 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
754 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
755
756 armnn::Pooling2dQueueDescriptor data;
757 armnn::WorkloadInfo info;
758 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
759 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
760 data.m_Parameters.m_PoolType = poolingType;
761 data.m_Parameters.m_PoolWidth = poolSize;
762 data.m_Parameters.m_PoolHeight = poolSize;
763 data.m_Parameters.m_StrideX = strideX;
764 data.m_Parameters.m_StrideY = strideY;
765 data.m_Parameters.m_PadLeft = padX;
766 data.m_Parameters.m_PadRight = padX;
767 data.m_Parameters.m_PadTop = padY;
768 data.m_Parameters.m_PadBottom = padY;
769 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
770
771 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
772 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
773
774 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100775 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000776 const size_t reasonIfUnsupportedMaxLen = 255;
777 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100778 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
telsoa014fcda012018-03-09 14:13:49 +0000779 data.m_Parameters,
780 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
781 if (!comparisonResult.supported)
782 {
783 return comparisonResult;
784 }
785
786 armnn::Pooling2dQueueDescriptor refData = data;
787 armnn::WorkloadInfo refInfo = info;
788 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
789 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
790
791 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
792 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
793
794 outputHandleRef->Allocate();
795 inputHandleRef->Allocate();
796 inputHandle->Allocate();
797 outputHandle->Allocate();
798
799 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
800 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
801
802 workload->Execute();
803 workloadRef->Execute();
804
805 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
806 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
807
808 return comparisonResult;
809}
810
811//
812// Tests max pooling with the following parameters:
813//
814// Pooling size: 2x2
815// Stride: (2,2)
816// input size: 4x4
817// channels: 1
818// batch size: 1
819//
820template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000821LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
822 armnn::IWorkloadFactory& workloadFactory,
823 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
824 bool forceNoPadding,
825 float qScale = 1.0f,
826 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000827{
828 armnn::Pooling2dDescriptor descriptor;
829 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
830 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
831 descriptor.m_StrideX = 2;
832 descriptor.m_StrideY = 2;
833 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
834 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
835 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
836 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
837
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000838
telsoa014fcda012018-03-09 14:13:49 +0000839 unsigned int inputWidth = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000840
telsoa014fcda012018-03-09 14:13:49 +0000841 unsigned int inputHeight = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000842
telsoa014fcda012018-03-09 14:13:49 +0000843 unsigned int outputWidth =
844 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
845 descriptor.m_StrideX;
846 unsigned int outputHeight =
847 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
848 descriptor.m_StrideY;
849 unsigned int channels = 1;
850 unsigned int batchSize = 1;
851
852 std::vector<float> inputData = {
853 510.0f, 222.0f, 780.0f, 654.0f,
854 141.0f, 276.0f, 15.0f, 546.0f,
855 303.0f, 618.0f, 582.0f, 339.0f,
856 438.0f, 564.0f, 573.0f, 402.0f
857 };
858
telsoa01c577f2c2018-08-31 09:22:23 +0100859 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
telsoa014fcda012018-03-09 14:13:49 +0000860 std::vector<float> expectedOutputDataWithPadding = {
861 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
862 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
863 };
864
865 std::vector<float> expectedOutputDataNoPadding = {
866 510.0f, 780.0f,
867 618.0f, 582.0f
868 };
869
870 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
871
872 // Scale and offset should match input - we're just calculating maximum values.
873 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
874
875 // Set quantization parameters if the requested type is a quantized type.
876 if(armnn::IsQuantizedType<T>())
877 {
878 inputTensorInfo.SetQuantizationScale(qScale);
879 inputTensorInfo.SetQuantizationOffset(qOffset);
880 outputTensorInfo.SetQuantizationScale(qScale);
881 outputTensorInfo.SetQuantizationOffset(qOffset);
882 }
883
884 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
885
886 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
887 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
888 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
889
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000890 return SimplePooling2dTestImpl<T>(
891 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000892}
893
surmeh01bceff2f2018-03-29 16:29:27 +0100894//
895// Tests max pooling with the following parameters:
896//
897// Pooling size: 3x2
898// Stride: (2,2)
899// input size: 3x2
900// channels: 1
901// batch size: 1
902//
903template<typename T>
904LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
905 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +0100907 bool forceNoPadding,
908 float qScale = 1.0f,
909 int32_t qOffset = 0)
910{
911 armnn::Pooling2dDescriptor descriptor;
912 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
913 descriptor.m_PoolWidth = 3;
914 descriptor.m_PoolHeight = 2;
915 descriptor.m_StrideX = 2;
916 descriptor.m_StrideY = 2;
917 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
918 descriptor.m_PadRight = descriptor.m_PadLeft;
919 descriptor.m_PadTop = 0;
920 descriptor.m_PadBottom = 0;
921 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
922 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
923
924 unsigned int inputWidth = 3;
925 unsigned int inputHeight = 2;
926 unsigned int outputWidth =
927 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
928 descriptor.m_StrideX;
929 unsigned int outputHeight =
930 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
931 descriptor.m_StrideY;
932 unsigned int channels = 1;
933 unsigned int batchSize = 1;
934
935 std::vector<float> inputData = {
936 3.0f, 6.0f, 9.0f,
937 12.0f, 15.0f, 18.0f,
938 };
939
940 std::vector<float> expectedOutputDataWithPadding = {
941 6.0f, 8.0f,
942 };
943
944 std::vector<float> expectedOutputDataNoPadding = {
945 10.5f,
946 };
947
948 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
949
950 // Scale and offset should match input - we're just calculating average values.
951 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
952
953 // Set quantization parameters if the requested type is a quantized type.
954 if(armnn::IsQuantizedType<T>())
955 {
956 inputTensorInfo.SetQuantizationScale(qScale);
957 inputTensorInfo.SetQuantizationOffset(qOffset);
958 outputTensorInfo.SetQuantizationScale(qScale);
959 outputTensorInfo.SetQuantizationOffset(qOffset);
960 }
961
962 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
963
964 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
965 forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
966 QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
967
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000968 return SimplePooling2dTestImpl<T>(
969 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
surmeh01bceff2f2018-03-29 16:29:27 +0100970}
971
972
telsoa014fcda012018-03-09 14:13:49 +0000973template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000974LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
975 armnn::IWorkloadFactory& workloadFactory,
976 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
977 float qScale = 1.0f,
978 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000979{
980 armnn::Pooling2dDescriptor descriptor;
981 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
982 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
983 descriptor.m_StrideX = descriptor.m_StrideY = 2;
984 descriptor.m_PadLeft = 1;
985 descriptor.m_PadRight = 1;
986 descriptor.m_PadTop = 1;
987 descriptor.m_PadBottom = 1;
988 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
989
990 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
991 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
992
993 // Set quantization parameters if the requested type is a quantized type.
994 if(armnn::IsQuantizedType<T>())
995 {
996 inputTensorInfo.SetQuantizationScale(qScale);
997 inputTensorInfo.SetQuantizationOffset(qOffset);
998 outputTensorInfo.SetQuantizationScale(qScale);
999 outputTensorInfo.SetQuantizationOffset(qOffset);
1000 }
1001
1002 auto input = MakeTensor<T, 4>(inputTensorInfo,
1003 QuantizedVector<T>(qScale, qOffset, {
1004 -1.0f, -2.0f, 3.0f, 4.0f,
1005 -1.0f, -2.0f, 3.0f, 4.0f,
1006 1.0f, 2.0f, -3.0f, -4.0f,
1007 1.0f, 2.0f, -3.0f, -4.0f,
1008 }));
1009
1010 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1011 QuantizedVector<T>(qScale, qOffset, {
1012 -1.0f, 3.0f, 4.0f,
1013 1.0f, 3.0f, 4.0f,
1014 1.0f, 2.0f, -4.0f,
1015 }));
1016
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001017 return SimplePooling2dTestImpl<T>(
1018 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001019}
1020
1021template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001022LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1023 armnn::IWorkloadFactory& workloadFactory,
1024 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1025 float qScale = 1.0f,
1026 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001027{
1028 armnn::Pooling2dDescriptor descriptor;
1029 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1030 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1031 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1032 descriptor.m_PadLeft = 1;
1033 descriptor.m_PadRight = 1;
1034 descriptor.m_PadTop = 1;
1035 descriptor.m_PadBottom = 1;
1036 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1037
1038 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1039 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1040
1041 // Set quantization parameters if the requested type is a quantized type.
1042 if(armnn::IsQuantizedType<T>())
1043 {
1044 inputTensorInfo.SetQuantizationScale(qScale);
1045 inputTensorInfo.SetQuantizationOffset(qOffset);
1046 outputTensorInfo.SetQuantizationScale(qScale);
1047 outputTensorInfo.SetQuantizationOffset(qOffset);
1048 }
1049
1050 auto input = MakeTensor<T, 4>(inputTensorInfo,
1051 QuantizedVector<T>(qScale, qOffset, {
1052 -1.0f, -2.0f, 3.0f, 4.0f,
1053 -1.0f, -2.0f, 3.0f, 4.0f,
1054 1.0f, 2.0f, -3.0f, -4.0f,
1055 1.0f, 2.0f, -3.0f, -4.0f,
1056 }));
1057
1058 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1059 QuantizedVector<T>(qScale, qOffset, {
1060 -1.0f, 3.0f, 4.0f, 4.0f,
1061 2.0f, 3.0f, 4.0f, 4.0f,
1062 2.0f, 3.0f, 4.0f, 4.0f,
1063 2.0f, 2.0f, 2.0f, -3.0f,
1064 }));
1065
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001066 return SimplePooling2dTestImpl<T>(
1067 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001068}
1069
1070template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001071LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1072 armnn::IWorkloadFactory& workloadFactory,
1073 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1074 float qScale = 1.0f,
1075 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001076{
1077 armnn::Pooling2dDescriptor descriptor;
1078 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1079 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1080 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1081 descriptor.m_PadLeft = 1;
1082 descriptor.m_PadRight = 1;
1083 descriptor.m_PadTop = 1;
1084 descriptor.m_PadBottom = 1;
1085 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1086
1087 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1088 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1089
1090 // Set quantization parameters if the requested type is a quantized type.
1091 if(armnn::IsQuantizedType<T>())
1092 {
1093 inputTensorInfo.SetQuantizationScale(qScale);
1094 inputTensorInfo.SetQuantizationOffset(qOffset);
1095 outputTensorInfo.SetQuantizationScale(qScale);
1096 outputTensorInfo.SetQuantizationOffset(qOffset);
1097 }
1098
1099 auto input = MakeTensor<T, 4>(inputTensorInfo,
1100 QuantizedVector<T>(qScale, qOffset, {
1101 12.0f, 20.0f, 32.0f, 40.0f,
1102 12.0f, 20.0f, 32.0f, 40.0f,
1103 12.0f, 20.0f, 32.0f, 40.0f,
1104 12.0f, 20.0f, 32.0f, 40.0f,
1105 }));
1106
1107 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1108 QuantizedVector<T>(qScale, qOffset, {
1109 3.0f, 13.0f, 10.0f,
1110 6.0f, 26.0f, 20.0f,
1111 3.0f, 13.0f, 10.0f,
1112 }));
1113
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001114 return SimplePooling2dTestImpl<T>(
1115 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001116}
1117
1118template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001119LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1120 armnn::IWorkloadFactory& workloadFactory,
1121 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1122 float qScale = 1.0f,
1123 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001124{
1125 armnn::Pooling2dDescriptor descriptor;
1126 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1127 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1128 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1129 descriptor.m_PadLeft = 0;
1130 descriptor.m_PadRight = 0;
1131 descriptor.m_PadTop = 0;
1132 descriptor.m_PadBottom = 0;
1133 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1134 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1135
1136 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>());
1137 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
1138
1139 // Set quantization parameters if the requested type is a quantized type.
1140 if(armnn::IsQuantizedType<T>())
1141 {
1142 inputTensorInfo.SetQuantizationScale(qScale);
1143 inputTensorInfo.SetQuantizationOffset(qOffset);
1144 outputTensorInfo.SetQuantizationScale(qScale);
1145 outputTensorInfo.SetQuantizationOffset(qOffset);
1146 }
1147
1148 auto input = MakeTensor<T, 4>(inputTensorInfo,
1149 QuantizedVector<T>(qScale, qOffset, {
1150 1.0f, 2.0f, 3.0f, 4.0f,
1151 1.0f, 2.0f, 3.0f, 4.0f,
1152 1.0f, 2.0f, 3.0f, 4.0f,
1153 1.0f, 2.0f, 3.0f, 4.0f,
1154 }));
1155
1156 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1157 QuantizedVector<T>(qScale, qOffset, {
1158 2.0f, 3.5f,
1159 2.0f, 3.5f
1160 }));
1161
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001162 return SimplePooling2dTestImpl<T>(
1163 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001164}
1165
1166template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001167LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1168 armnn::IWorkloadFactory& workloadFactory,
1169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1170 float qScale = 1.0f,
1171 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001172{
1173 armnn::Pooling2dDescriptor descriptor;
1174 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1175 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1176 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1177 descriptor.m_PadLeft = 1;
1178 descriptor.m_PadRight = 1;
1179 descriptor.m_PadTop = 1;
1180 descriptor.m_PadBottom = 1;
1181 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1182
1183 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1184 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1185
1186 // Set quantization parameters if the requested type is a quantized type.
1187 if(armnn::IsQuantizedType<T>())
1188 {
1189 inputTensorInfo.SetQuantizationScale(qScale);
1190 inputTensorInfo.SetQuantizationOffset(qOffset);
1191 outputTensorInfo.SetQuantizationScale(qScale);
1192 outputTensorInfo.SetQuantizationOffset(qOffset);
1193 }
1194
1195 auto input = MakeTensor<T, 4>(inputTensorInfo,
1196 QuantizedVector<T>(qScale, qOffset, {
1197 9.0f, 27.0f, 18.0f, 36.0f,
1198 18.0f, 9.0f, 18.0f, 9.0f,
1199 27.0f, 18.0f, 9.0f, 27.0f,
1200 9.0f, 27.0f, 9.0f, 18.0f,
1201 }));
1202
1203 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1204 QuantizedVector<T>(qScale, qOffset, {
1205 7.0f, 11.0f, 13.0f, 9.0f,
1206 12.0f, 17.0f, 19.0f, 13.0f,
1207 12.0f, 16.0f, 16.0f, 10.0f,
1208 9.0f, 11.0f, 12.0f, 7.0f,
1209 }));
1210
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001211 return SimplePooling2dTestImpl<T>(
1212 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001213}
1214
1215template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001216LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1217 armnn::IWorkloadFactory& workloadFactory,
1218 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1219 float qScale = 1.0f,
1220 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001221{
1222 armnn::Pooling2dDescriptor descriptor;
1223 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1224 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1225 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1226 descriptor.m_PadLeft = 1;
1227 descriptor.m_PadRight = 1;
1228 descriptor.m_PadTop = 1;
1229 descriptor.m_PadBottom = 1;
1230 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1231
1232 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1233 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
1234
1235 // Set quantization parameters if the requested type is a quantized type.
1236 if(armnn::IsQuantizedType<T>())
1237 {
1238 inputTensorInfo.SetQuantizationScale(qScale);
1239 inputTensorInfo.SetQuantizationOffset(qOffset);
1240 outputTensorInfo.SetQuantizationScale(qScale);
1241 outputTensorInfo.SetQuantizationOffset(qOffset);
1242 }
1243
1244 auto input = MakeTensor<T, 4>(inputTensorInfo,
1245 QuantizedVector<T>(qScale, qOffset, {
1246 2.0f, 4.0f, 8.0f, 16.0f,
1247 4.0f, 2.0f, 2.0f, 4.0f,
1248 8.0f, 2.0f, 4.0f, 2.0f,
1249 16.0f, 2.0f, 2.0f, 8.0f,
1250 }));
1251
1252 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1253 QuantizedVector<T>(qScale, qOffset, {
1254 1.0f, 4.4721f, 8.0f,
1255 4.4721f, 2.6457f, 2.236f,
1256 8.0f, 1.4142f, 4.0f,
1257 }));
1258
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001259 return SimplePooling2dTestImpl<T>(
1260 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001261}
1262
1263template<typename T>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001264LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1265 armnn::IWorkloadFactory& workloadFactory,
1266 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1267 float qScale = 1.0f,
1268 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001269{
1270 armnn::Pooling2dDescriptor descriptor;
1271 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1272 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1273 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1274 descriptor.m_PadLeft = 1;
1275 descriptor.m_PadRight = 1;
1276 descriptor.m_PadTop = 1;
1277 descriptor.m_PadBottom = 1;
1278 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1279
1280 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1281 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
1282
1283 // Set quantization parameters if the requested type is a quantized type.
1284 if(armnn::IsQuantizedType<T>())
1285 {
1286 inputTensorInfo.SetQuantizationScale(qScale);
1287 inputTensorInfo.SetQuantizationOffset(qOffset);
1288 outputTensorInfo.SetQuantizationScale(qScale);
1289 outputTensorInfo.SetQuantizationOffset(qOffset);
1290 }
1291
1292 auto input = MakeTensor<T, 4>(inputTensorInfo,
1293 QuantizedVector<T>(qScale, qOffset, {
1294 1.0f, 2.0f, 3.0f, 4.0f,
1295 1.0f, 2.0f, 3.0f, 4.0f,
1296 1.0f, 2.0f, 3.0f, 4.0f,
1297 1.0f, 2.0f, 3.0f, 4.0f,
1298 }));
1299
1300 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
1301 QuantizedVector<T>(qScale, qOffset, {
1302 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1303 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1304 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1305 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1306 }));
1307
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001308 return SimplePooling2dTestImpl<T>(
1309 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001310}