blob: b58e9826b86432dd39c6cd9a6fbccef9e1e03e61 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa014fcda012018-03-09 14:13:49 +00005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "Pooling2dTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010
11#include <armnn/LayerSupport.hpp>
12
13#include <armnnUtils/TensorUtils.hpp>
14#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000017#include <backendsCommon/WorkloadInfo.hpp>
18
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010019#include <backendsCommon/test/TensorCopyUtils.hpp>
20#include <backendsCommon/test/WorkloadTestUtils.hpp>
21
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000022#include <test/TensorHelpers.hpp>
23
James Conroy45a9b772018-10-31 11:47:53 +000024#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000025
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010026namespace
27{
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000028
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010029using namespace armnnUtils;
30
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000031template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000032LayerTestResult<T, 4> SimplePooling2dTestImpl(
33 armnn::IWorkloadFactory& workloadFactory,
34 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
35 armnn::Pooling2dDescriptor descriptor,
36 float qScale,
37 int32_t qOffset,
38 const boost::multi_array<T, 4>& input,
39 const boost::multi_array<T, 4>& outputExpected)
telsoa014fcda012018-03-09 14:13:49 +000040{
Derek Lambertic374ff02019-12-10 21:57:35 +000041 boost::ignore_unused(memoryManager);
Matthew Bentham8800c002018-11-19 13:19:28 +000042 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
Matteo Martincigh21350152018-11-28 16:22:22 +000043 const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
Matthew Bentham8800c002018-11-19 13:19:28 +000044 auto heightIndex = dimensionIndices.GetHeightIndex();
45 auto widthIndex = dimensionIndices.GetWidthIndex();
46 auto channelsIndex = dimensionIndices.GetChannelsIndex();
telsoa014fcda012018-03-09 14:13:49 +000047
James Conroy69482272018-10-19 10:41:35 +010048 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
49 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
50 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
51 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
52
53 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
54 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
55 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
telsoa014fcda012018-03-09 14:13:49 +000056 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
57
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000058 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
59 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
60
61 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
62 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000063
64 // Set quantization parameters if the requested type is a quantized type.
65 if(armnn::IsQuantizedType<T>())
66 {
67 inputTensorInfo.SetQuantizationScale(qScale);
68 inputTensorInfo.SetQuantizationOffset(qOffset);
69 outputTensorInfo.SetQuantizationScale(qScale);
70 outputTensorInfo.SetQuantizationOffset(qOffset);
71 }
72
73 LayerTestResult<T, 4> result(outputTensorInfo);
74
75 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
76 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
77
78 armnn::Pooling2dQueueDescriptor queueDescriptor;
79 queueDescriptor.m_Parameters = descriptor;
James Conroy45a9b772018-10-31 11:47:53 +000080 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +010081
82 armnn::WorkloadInfo workloadInfo;
83 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
84 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
85
86 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +010087 armnn::BackendId backend = workloadFactory.GetBackendId();
Francis Murtagh043d0d02018-10-05 14:08:48 +010088 const size_t reasonIfUnsupportedMaxLen = 255;
89 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +010090 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
Francis Murtagh043d0d02018-10-05 14:08:48 +010091 queueDescriptor.m_Parameters,
92 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
93 if (!result.supported)
94 {
95 return result;
96 }
97
98 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
99
100 inputHandle->Allocate();
101 outputHandle->Allocate();
102
103 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
104
105 workload->Execute();
106
107 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
108
109 result.outputExpected = outputExpected;
110
111 return result;
112}
113
telsoa014fcda012018-03-09 14:13:49 +0000114//
115// Tests max pooling with the following parameters:
116//
117// Pooling size: 3x3
118// Stride: (2,4)
119// input size: 8x13
120// channels: 2
121// batch size: 2
122//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000123template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000124LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
125 armnn::IWorkloadFactory& workloadFactory,
126 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
127 bool forceNoPadding,
128 float qScale = 1.0f,
129 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000130{
131 armnn::Pooling2dDescriptor descriptor;
132 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
133 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
134 descriptor.m_StrideX = 2;
135 descriptor.m_StrideY = 4;
136 // forceNoPadding is mainly used for compatibility with ARM Compute.
137 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
138 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
139 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
140 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
141 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
142
143 unsigned int inputWidth = 8;
144 unsigned int inputHeight = 13;
145 unsigned int outputWidth =
146 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
147 descriptor.m_StrideX;
148 unsigned int outputHeight =
149 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
150 descriptor.m_StrideY;
151 unsigned int channels = 2;
152 unsigned int batchSize = 2;
153
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000154 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
155 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000156
157 // Set quantization parameters if the requested type is a quantized type.
158 if(armnn::IsQuantizedType<T>())
159 {
160 inputTensorInfo.SetQuantizationScale(qScale);
161 inputTensorInfo.SetQuantizationOffset(qOffset);
162 outputTensorInfo.SetQuantizationScale(qScale);
163 outputTensorInfo.SetQuantizationOffset(qOffset);
164 }
165
166 std::vector<float> singleChannelData({
167 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
168 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
169 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
170 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
171 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
172 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
173 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
174 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
175 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
176 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
177 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
178 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
179 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
180 });
181
telsoa01c577f2c2018-08-31 09:22:23 +0100182 // Constructs input data.
telsoa014fcda012018-03-09 14:13:49 +0000183 std::vector<float> inputData;
184 auto negator = [](float f) { return -f; };
185
telsoa01c577f2c2018-08-31 09:22:23 +0100186 // First image (two channels where the second channel is the negative of the first one).
telsoa014fcda012018-03-09 14:13:49 +0000187 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
188 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // Second image (same as first image).
telsoa014fcda012018-03-09 14:13:49 +0000191 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
192 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
193
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100194 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000195
telsoa01c577f2c2018-08-31 09:22:23 +0100196 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000197 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
198 boost::multi_array<T, 4> outputExpected(shape);
199 if (forceNoPadding)
200 {
201 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100202 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000203 8.0f, 8.0f, 8.0f,
204 9.0f, 7.0f, 9.0f,
205 9.0f, 9.0f, 9.0f,
206
207 0.0f, 0.0f, -3.0f,
208 -1.0f, 0.0f, 0.0f,
209 -1.0f, -1.0f, -1.0f,
210
211 8.0f, 8.0f, 8.0f,
212 9.0f, 7.0f, 9.0f,
213 9.0f, 9.0f, 9.0f,
214
215 0.0f, 0.0f, -3.0f,
216 -1.0f, 0.0f, 0.0f,
217 -1.0f, -1.0f, -1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100218 },
219 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000220 }
221 else
222 {
223 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100224 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000225 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
226 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
227 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
228
Finn Williams70f609b2019-11-06 16:54:53 +0000229 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
230 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
231 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
telsoa014fcda012018-03-09 14:13:49 +0000232
233 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
234 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
235 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
236
Finn Williams70f609b2019-11-06 16:54:53 +0000237 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
238 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
239 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100240 },
241 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000242 }
243
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000244 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000245 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000246}
247
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000248template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000249LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
250 armnn::IWorkloadFactory& workloadFactory,
251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000252 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000253 float qScale = 1.0f,
254 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000255{
256 armnn::Pooling2dDescriptor descriptor;
James Conroy45a9b772018-10-31 11:47:53 +0000257 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
telsoa014fcda012018-03-09 14:13:49 +0000258 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
259 descriptor.m_StrideX = descriptor.m_StrideY = 2;
telsoa014fcda012018-03-09 14:13:49 +0000260 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy69482272018-10-19 10:41:35 +0100261 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000262
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000263 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
264 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000265
266 // Set quantization parameters if the requested type is a quantized type.
267 if(armnn::IsQuantizedType<T>())
268 {
269 inputTensorInfo.SetQuantizationScale(qScale);
270 inputTensorInfo.SetQuantizationOffset(qOffset);
271 outputTensorInfo.SetQuantizationScale(qScale);
272 outputTensorInfo.SetQuantizationOffset(qOffset);
273 }
274
James Conroy45a9b772018-10-31 11:47:53 +0000275 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100276 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000277 1.0f, 2.0f, 5.0f, 6.0f,
278 3.0f, 4.0f, 7.0f, 8.0f,
279 9.0f, 10.0f, 13.0f, 14.0f,
280 11.0f, 12.0f, 15.0f, 16.0f,
281
282 17.0f, 18.0f, 21.0f, 22.0f,
283 19.0f, 20.0f, 23.0f, 24.0f,
284 25.0f, 26.0f, 29.0f, 30.0f,
285 27.0f, 28.0f, 31.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100286 },
287 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000288
James Conroy45a9b772018-10-31 11:47:53 +0000289 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100290 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000291 4.0f, 8.0f,
292 12.0f, 16.0f,
293
294 20.0f, 24.0f,
295 28.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100296 },
297 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000298
James Conroy45a9b772018-10-31 11:47:53 +0000299 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000300 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000301 {
302 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000303 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000304 inputData = tmp;
305
306 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000307 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000308 outputData = tmp1;
309 }
310
311 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
312
313 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
314
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000315 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000316 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000317}
318
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000320LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
321 armnn::IWorkloadFactory& workloadFactory,
322 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000323 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000324 float qScale = 1.0f,
325 int32_t qOffset = 0)
Francis Murtagh043d0d02018-10-05 14:08:48 +0100326{
James Conroy45a9b772018-10-31 11:47:53 +0000327 armnn::Pooling2dDescriptor descriptor;
328 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
329 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
330 descriptor.m_StrideX = descriptor.m_StrideY = 2;
331 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
332 descriptor.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +0100333
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000334 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
335 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100336
James Conroy45a9b772018-10-31 11:47:53 +0000337 // Set quantization parameters if the requested type is a quantized type.
338 if(armnn::IsQuantizedType<T>())
339 {
340 inputTensorInfo.SetQuantizationScale(qScale);
341 inputTensorInfo.SetQuantizationOffset(qOffset);
342 outputTensorInfo.SetQuantizationScale(qScale);
343 outputTensorInfo.SetQuantizationOffset(qOffset);
344 }
Francis Murtagh043d0d02018-10-05 14:08:48 +0100345
James Conroy45a9b772018-10-31 11:47:53 +0000346 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100347 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000348 2.0f, 2.0f, 6.0f, 6.0f,
349 4.0f, 4.0f, 8.0f, 8.0f,
350 10.0f, 12.0f, 14.0f, 16.0f,
351 10.0f, 12.0f, 16.0f, 14.0f,
352
353 18.0f, 20.0f, 24.0f, 22.0f,
354 20.0f, 18.0f, 22.0f, 24.0f,
355 26.0f, 28.0f, 0.0f, 0.0f,
356 26.0f, 28.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100357 },
358 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000359
360 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100361 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000362 3.0f, 7.0f,
363 11.0f, 15.0f,
364
365 19.0f, 23.0f,
366 27.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100367 },
368 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000369
370 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000371 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000372 {
373 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000374 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000375 inputData = tmp;
376
377 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000378 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000379 outputData = tmp1;
380 }
381
382 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
383
384 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
385
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000386 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000387 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100388}
389
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000390template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000391LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
392 armnn::IWorkloadFactory& workloadFactory,
393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
394 float qScale = 1.0f,
395 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000396{
397 armnn::Pooling2dDescriptor descriptor;
398 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
399 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
400 descriptor.m_StrideX = descriptor.m_StrideY = 5;
401 descriptor.m_PadLeft = 50;
402 descriptor.m_PadRight = 50;
403 descriptor.m_PadTop = 50;
404 descriptor.m_PadBottom = 50;
405 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
406
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000407 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
408 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000409
410 // Set quantization parameters if the requested type is a quantized type.
411 if(armnn::IsQuantizedType<T>())
412 {
413 inputTensorInfo.SetQuantizationScale(qScale);
414 inputTensorInfo.SetQuantizationOffset(qOffset);
415 outputTensorInfo.SetQuantizationScale(qScale);
416 outputTensorInfo.SetQuantizationOffset(qOffset);
417 }
418
419 std::vector<T> inputVec;
420
421 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
422 {
423 inputVec.push_back(1);
424 }
425
426 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
427
428 std::vector<T> outputVec;
429
430 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
431 {
432 outputVec.push_back(1);
433 }
434
435 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
436
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000437 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000438 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000439}
440
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000441template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000442LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
443 armnn::IWorkloadFactory& workloadFactory,
444 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000445 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000446 float qScale = 1.0f,
447 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000448{
449 armnn::Pooling2dDescriptor descriptor;
450 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
451 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
452 descriptor.m_StrideX = descriptor.m_StrideY = 2;
453 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy45a9b772018-10-31 11:47:53 +0000454 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000455
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000456 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
457 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
James Conroy45a9b772018-10-31 11:47:53 +0000458
459 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100460 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000461 1.0f, 7.0f, 5.0f, 5.0f,
462 1.0f, 7.0f, 5.0f, 5.0f,
463 3.0f, 3.0f, 1.0f, 1.0f,
464 3.0f, 3.0f, 1.0f, 1.0f,
465
466 1.0f, 7.0f, 0.0f, 0.0f,
467 1.0f, 7.0f, 2.0f, 0.0f,
468 0.0f, 2.0f, 1.0f, 1.0f,
469 0.0f, 0.0f, 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100470 },
471 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000472
James Conroy45a9b772018-10-31 11:47:53 +0000473 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100474 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000475 5.0f, 5.0f,
James Conroy45a9b772018-10-31 11:47:53 +0000476 3.0f, 1.0f,
477
478 5.0f, 1.0f,
479 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100480 },
481 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000482
James Conroy45a9b772018-10-31 11:47:53 +0000483 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000484 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000485 {
486 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000487 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000488 inputData = tmp;
489
490 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000491 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000492 outputData = tmp1;
493 }
494
495 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
496
497 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
498
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000499 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000500 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000501}
502
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000503template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000504LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
505 armnn::IWorkloadFactory& workloadFactory,
506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
507 float qScale = 1.0f,
508 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000509{
510 armnn::Pooling2dDescriptor descriptor;
511 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
512 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
513 descriptor.m_StrideX = descriptor.m_StrideY = 1;
514 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
515
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000516 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000517 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100518 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000519 2.0f, 1.0f, 5.0f, 2.0f,
520 1.0f, 2.0f, 2.0f, 1.0f,
521 5.0f, 4.0f, 1.0f, 5.0f,
522 2.0f, 1.0f, 5.0f, 2.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100523 },
524 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000525
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000526 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000527 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100528 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000529 3.0f, 3.0f,
530 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100531 },
532 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000533
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000534 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000535 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000536}
537
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000538template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000539LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
540 armnn::IWorkloadFactory& workloadFactory,
541 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
542 float qScale = 1.0f,
543 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000544{
545 armnn::Pooling2dDescriptor descriptor;
546 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
547 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
548 descriptor.m_StrideX = descriptor.m_StrideY = 3;
549 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
550
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000551 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000552 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100553 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000554 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
555 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
556 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
557 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
558 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
559 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
560 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
561 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
562 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100563 },
564 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000565
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000566 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000567 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100568 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000569 3.0f, 3.0f, 3.0f,
570 3.0f, 3.0f, 3.0f,
571 3.0f, 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100572 },
573 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000574
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000575 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000576 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000577}
578
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000579template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000580LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
581 armnn::IWorkloadFactory& workloadFactory,
582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
583 float qScale = 1.0f,
584 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000585{
586 armnn::Pooling2dDescriptor descriptor;
587 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
588 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
589 descriptor.m_StrideX = descriptor.m_StrideY = 4;
590 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
591
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000592 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000593 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100594 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000595 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
596 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
597 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
598 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
599 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100602 },
603 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000604
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000605 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000606 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100607 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000608 3.0f, 3.0f,
609 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100610 },
611 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000612
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000613 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000614 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000615}
616
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000617template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000618LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
619 armnn::IWorkloadFactory& workloadFactory,
620 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
621 float qScale = 1.0f,
622 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000623{
624 armnn::Pooling2dDescriptor descriptor;
625 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
626 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
627 descriptor.m_StrideX = descriptor.m_StrideY = 7;
628 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
629
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000630 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000631 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100632 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000633 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
634 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
635 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
636 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
637 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
638 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
639 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100640 },
641 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000642
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000643 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000644 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100645 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000646 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100647 },
648 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000649
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000650 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000651 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000652}
653
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000654template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000655LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
656 armnn::IWorkloadFactory& workloadFactory,
657 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
658 float qScale = 1.0f,
659 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000660{
661 armnn::Pooling2dDescriptor descriptor;
662 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
663 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
664 descriptor.m_StrideX = descriptor.m_StrideY = 9;
665 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
666
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000667 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000668 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100669 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000670 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
671 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
672 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
673 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
674 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
675 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
676 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
677 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
678 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100679 },
680 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000681
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000682 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000683 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100684 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000685 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100686 },
687 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000688
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000689 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000690 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000691}
692
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000693template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000694LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
695 armnn::IWorkloadFactory& workloadFactory,
696 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
697 float qScale = 1.0f,
698 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000699{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000700 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
701 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000702
703 armnn::Pooling2dDescriptor descriptor;
704 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
705 descriptor.m_PoolWidth = 2;
706 descriptor.m_PoolHeight = 3;
707 descriptor.m_StrideX = 2;
708 descriptor.m_StrideY = 1;
709 descriptor.m_PadLeft = 2;
710 descriptor.m_PadRight = 0;
711 descriptor.m_PadTop = 1;
712 descriptor.m_PadBottom = 2;
713 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
714 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
715
telsoa01c577f2c2018-08-31 09:22:23 +0100716 // Construct input data.
telsoa014fcda012018-03-09 14:13:49 +0000717 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100718 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000719 1.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100720 },
721 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000722
telsoa01c577f2c2018-08-31 09:22:23 +0100723 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000724 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100725 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000726 0.0f, 3.0f, 0.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100727 },
728 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000729
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000730 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000731 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000732}
733
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000734template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000735LayerTestResult<T, 4> ComparePooling2dTestCommon(
736 armnn::IWorkloadFactory& workloadFactory,
737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
738 armnn::IWorkloadFactory& refWorkloadFactory,
739 armnn::PoolingAlgorithm poolingType,
740 float qScale = 1.0f,
741 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000742{
Derek Lambertic374ff02019-12-10 21:57:35 +0000743 boost::ignore_unused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000744 const unsigned int inputWidth = 16;
745 const unsigned int inputHeight = 32;
746 const unsigned int channelCount = 2;
747 const unsigned int batchSize = 5;
748
749 const unsigned int poolSize = 3;
750 const unsigned int strideX = 2;
751 const unsigned int strideY = 4;
752 const unsigned int padX = 0;
753 const unsigned int padY = 0;
754
755 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
756 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
757
758 armnn::TensorInfo inputTensorInfo;
759 armnn::TensorInfo outputTensorInfo;
760
761 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
762 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
763
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000764 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
765 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000766
767 // Set quantization parameters if the requested type is a quantized type.
768 if(armnn::IsQuantizedType<T>())
769 {
770 inputTensorInfo.SetQuantizationScale(qScale);
771 inputTensorInfo.SetQuantizationOffset(qOffset);
772 outputTensorInfo.SetQuantizationScale(qScale);
773 outputTensorInfo.SetQuantizationOffset(qOffset);
774 }
775
776 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
777
778 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
779
780 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
781 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
782
783 armnn::Pooling2dQueueDescriptor data;
784 armnn::WorkloadInfo info;
785 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
786 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
787 data.m_Parameters.m_PoolType = poolingType;
788 data.m_Parameters.m_PoolWidth = poolSize;
789 data.m_Parameters.m_PoolHeight = poolSize;
790 data.m_Parameters.m_StrideX = strideX;
791 data.m_Parameters.m_StrideY = strideY;
792 data.m_Parameters.m_PadLeft = padX;
793 data.m_Parameters.m_PadRight = padX;
794 data.m_Parameters.m_PadTop = padY;
795 data.m_Parameters.m_PadBottom = padY;
796 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
797
798 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
799 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
800
801 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100802 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000803 const size_t reasonIfUnsupportedMaxLen = 255;
804 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100805 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
telsoa014fcda012018-03-09 14:13:49 +0000806 data.m_Parameters,
807 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
808 if (!comparisonResult.supported)
809 {
810 return comparisonResult;
811 }
812
813 armnn::Pooling2dQueueDescriptor refData = data;
814 armnn::WorkloadInfo refInfo = info;
815 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
816 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
817
818 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
819 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
820
821 outputHandleRef->Allocate();
822 inputHandleRef->Allocate();
823 inputHandle->Allocate();
824 outputHandle->Allocate();
825
826 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
827 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
828
829 workload->Execute();
830 workloadRef->Execute();
831
832 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
833 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
834
835 return comparisonResult;
836}
837
838//
839// Tests max pooling with the following parameters:
840//
841// Pooling size: 2x2
842// Stride: (2,2)
843// input size: 4x4
844// channels: 1
845// batch size: 1
846//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000847template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000848LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
849 armnn::IWorkloadFactory& workloadFactory,
850 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
851 bool forceNoPadding,
852 float qScale = 1.0f,
853 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000854{
855 armnn::Pooling2dDescriptor descriptor;
856 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
857 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
858 descriptor.m_StrideX = 2;
859 descriptor.m_StrideY = 2;
860 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
861 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
862 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
863 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
864
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000865
telsoa014fcda012018-03-09 14:13:49 +0000866 unsigned int inputWidth = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000867
telsoa014fcda012018-03-09 14:13:49 +0000868 unsigned int inputHeight = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000869
telsoa014fcda012018-03-09 14:13:49 +0000870 unsigned int outputWidth =
871 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
872 descriptor.m_StrideX;
873 unsigned int outputHeight =
874 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
875 descriptor.m_StrideY;
876 unsigned int channels = 1;
877 unsigned int batchSize = 1;
878
879 std::vector<float> inputData = {
880 510.0f, 222.0f, 780.0f, 654.0f,
881 141.0f, 276.0f, 15.0f, 546.0f,
882 303.0f, 618.0f, 582.0f, 339.0f,
883 438.0f, 564.0f, 573.0f, 402.0f
884 };
885
telsoa01c577f2c2018-08-31 09:22:23 +0100886 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
telsoa014fcda012018-03-09 14:13:49 +0000887 std::vector<float> expectedOutputDataWithPadding = {
888 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
889 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
890 };
891
892 std::vector<float> expectedOutputDataNoPadding = {
893 510.0f, 780.0f,
894 618.0f, 582.0f
895 };
896
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000897 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000898
899 // Scale and offset should match input - we're just calculating maximum values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000900 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000901
902 // Set quantization parameters if the requested type is a quantized type.
903 if(armnn::IsQuantizedType<T>())
904 {
905 inputTensorInfo.SetQuantizationScale(qScale);
906 inputTensorInfo.SetQuantizationOffset(qOffset);
907 outputTensorInfo.SetQuantizationScale(qScale);
908 outputTensorInfo.SetQuantizationOffset(qOffset);
909 }
910
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100911 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000912
913 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100914 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
915 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000916
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000917 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000918 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000919}
920
surmeh01bceff2f2018-03-29 16:29:27 +0100921//
922// Tests max pooling with the following parameters:
923//
924// Pooling size: 3x2
925// Stride: (2,2)
926// input size: 3x2
927// channels: 1
928// batch size: 1
929//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000930template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
surmeh01bceff2f2018-03-29 16:29:27 +0100931LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
932 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000933 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +0100934 bool forceNoPadding,
935 float qScale = 1.0f,
936 int32_t qOffset = 0)
937{
938 armnn::Pooling2dDescriptor descriptor;
939 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
940 descriptor.m_PoolWidth = 3;
941 descriptor.m_PoolHeight = 2;
942 descriptor.m_StrideX = 2;
943 descriptor.m_StrideY = 2;
944 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
945 descriptor.m_PadRight = descriptor.m_PadLeft;
946 descriptor.m_PadTop = 0;
947 descriptor.m_PadBottom = 0;
948 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
949 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
950
951 unsigned int inputWidth = 3;
952 unsigned int inputHeight = 2;
953 unsigned int outputWidth =
954 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
955 descriptor.m_StrideX;
956 unsigned int outputHeight =
957 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
958 descriptor.m_StrideY;
959 unsigned int channels = 1;
960 unsigned int batchSize = 1;
961
962 std::vector<float> inputData = {
963 3.0f, 6.0f, 9.0f,
964 12.0f, 15.0f, 18.0f,
965 };
966
967 std::vector<float> expectedOutputDataWithPadding = {
968 6.0f, 8.0f,
969 };
970
971 std::vector<float> expectedOutputDataNoPadding = {
972 10.5f,
973 };
974
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000975 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100976
977 // Scale and offset should match input - we're just calculating average values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000978 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100979
980 // Set quantization parameters if the requested type is a quantized type.
981 if(armnn::IsQuantizedType<T>())
982 {
983 inputTensorInfo.SetQuantizationScale(qScale);
984 inputTensorInfo.SetQuantizationOffset(qOffset);
985 outputTensorInfo.SetQuantizationScale(qScale);
986 outputTensorInfo.SetQuantizationOffset(qOffset);
987 }
988
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100989 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +0100990
991 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100992 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
993 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +0100994
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000995 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000996 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
surmeh01bceff2f2018-03-29 16:29:27 +0100997}
998
999
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001000template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001001LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1002 armnn::IWorkloadFactory& workloadFactory,
1003 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1004 float qScale = 1.0f,
1005 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001006{
1007 armnn::Pooling2dDescriptor descriptor;
1008 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1009 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1010 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1011 descriptor.m_PadLeft = 1;
1012 descriptor.m_PadRight = 1;
1013 descriptor.m_PadTop = 1;
1014 descriptor.m_PadBottom = 1;
1015 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1016
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001017 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1018 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001019
1020 // Set quantization parameters if the requested type is a quantized type.
1021 if(armnn::IsQuantizedType<T>())
1022 {
1023 inputTensorInfo.SetQuantizationScale(qScale);
1024 inputTensorInfo.SetQuantizationOffset(qOffset);
1025 outputTensorInfo.SetQuantizationScale(qScale);
1026 outputTensorInfo.SetQuantizationOffset(qOffset);
1027 }
1028
1029 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001030 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001031 -1.0f, -2.0f, 3.0f, 4.0f,
1032 -1.0f, -2.0f, 3.0f, 4.0f,
1033 1.0f, 2.0f, -3.0f, -4.0f,
1034 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001035 },
1036 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001037
1038 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001039 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001040 -1.0f, 3.0f, 4.0f,
1041 1.0f, 3.0f, 4.0f,
1042 1.0f, 2.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001043 },
1044 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001045
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001046 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001047 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001048}
1049
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001050template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001051LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1052 armnn::IWorkloadFactory& workloadFactory,
1053 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1054 float qScale = 1.0f,
1055 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001056{
1057 armnn::Pooling2dDescriptor descriptor;
1058 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1059 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1060 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1061 descriptor.m_PadLeft = 1;
1062 descriptor.m_PadRight = 1;
1063 descriptor.m_PadTop = 1;
1064 descriptor.m_PadBottom = 1;
1065 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1066
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001067 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1068 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001069
1070 // Set quantization parameters if the requested type is a quantized type.
1071 if(armnn::IsQuantizedType<T>())
1072 {
1073 inputTensorInfo.SetQuantizationScale(qScale);
1074 inputTensorInfo.SetQuantizationOffset(qOffset);
1075 outputTensorInfo.SetQuantizationScale(qScale);
1076 outputTensorInfo.SetQuantizationOffset(qOffset);
1077 }
1078
1079 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001080 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001081 -1.0f, -2.0f, 3.0f, 4.0f,
1082 -1.0f, -2.0f, 3.0f, 4.0f,
1083 1.0f, 2.0f, -3.0f, -4.0f,
1084 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001085 },
1086 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001087
1088 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001089 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001090 -1.0f, 3.0f, 4.0f, 4.0f,
1091 2.0f, 3.0f, 4.0f, 4.0f,
1092 2.0f, 3.0f, 4.0f, 4.0f,
1093 2.0f, 2.0f, 2.0f, -3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001094 },
1095 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001096
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001097 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001098 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001099}
1100
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001101template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001102LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1103 armnn::IWorkloadFactory& workloadFactory,
1104 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1105 float qScale = 1.0f,
1106 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001107{
1108 armnn::Pooling2dDescriptor descriptor;
1109 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1110 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1111 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1112 descriptor.m_PadLeft = 1;
1113 descriptor.m_PadRight = 1;
1114 descriptor.m_PadTop = 1;
1115 descriptor.m_PadBottom = 1;
1116 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1117
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001118 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1119 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001120
1121 // Set quantization parameters if the requested type is a quantized type.
1122 if(armnn::IsQuantizedType<T>())
1123 {
1124 inputTensorInfo.SetQuantizationScale(qScale);
1125 inputTensorInfo.SetQuantizationOffset(qOffset);
1126 outputTensorInfo.SetQuantizationScale(qScale);
1127 outputTensorInfo.SetQuantizationOffset(qOffset);
1128 }
1129
1130 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001131 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001132 12.0f, 20.0f, 32.0f, 40.0f,
1133 12.0f, 20.0f, 32.0f, 40.0f,
1134 12.0f, 20.0f, 32.0f, 40.0f,
1135 12.0f, 20.0f, 32.0f, 40.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001136 },
1137 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001138
1139 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001140 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001141 3.0f, 13.0f, 10.0f,
1142 6.0f, 26.0f, 20.0f,
1143 3.0f, 13.0f, 10.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001144 },
1145 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001146
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001147 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001148 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001149}
1150
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001151template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001152LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1153 armnn::IWorkloadFactory& workloadFactory,
1154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1155 float qScale = 1.0f,
1156 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001157{
1158 armnn::Pooling2dDescriptor descriptor;
1159 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1160 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1161 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1162 descriptor.m_PadLeft = 0;
1163 descriptor.m_PadRight = 0;
1164 descriptor.m_PadTop = 0;
1165 descriptor.m_PadBottom = 0;
1166 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1167 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1168
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001169 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1170 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001171
1172 // Set quantization parameters if the requested type is a quantized type.
1173 if(armnn::IsQuantizedType<T>())
1174 {
1175 inputTensorInfo.SetQuantizationScale(qScale);
1176 inputTensorInfo.SetQuantizationOffset(qOffset);
1177 outputTensorInfo.SetQuantizationScale(qScale);
1178 outputTensorInfo.SetQuantizationOffset(qOffset);
1179 }
1180
1181 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001182 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001183 1.0f, 2.0f, 3.0f, 4.0f,
1184 1.0f, 2.0f, 3.0f, 4.0f,
1185 1.0f, 2.0f, 3.0f, 4.0f,
1186 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001187 },
1188 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001189
1190 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001191 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001192 2.0f, 3.5f,
1193 2.0f, 3.5f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001194 },
1195 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001196
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001197 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001198 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001199}
1200
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001201template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001202LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1203 armnn::IWorkloadFactory& workloadFactory,
1204 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1205 float qScale = 1.0f,
1206 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001207{
1208 armnn::Pooling2dDescriptor descriptor;
1209 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1210 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1211 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1212 descriptor.m_PadLeft = 1;
1213 descriptor.m_PadRight = 1;
1214 descriptor.m_PadTop = 1;
1215 descriptor.m_PadBottom = 1;
1216 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1217
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001218 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1219 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001220
1221 // Set quantization parameters if the requested type is a quantized type.
1222 if(armnn::IsQuantizedType<T>())
1223 {
1224 inputTensorInfo.SetQuantizationScale(qScale);
1225 inputTensorInfo.SetQuantizationOffset(qOffset);
1226 outputTensorInfo.SetQuantizationScale(qScale);
1227 outputTensorInfo.SetQuantizationOffset(qOffset);
1228 }
1229
1230 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001231 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001232 9.0f, 27.0f, 18.0f, 36.0f,
1233 18.0f, 9.0f, 18.0f, 9.0f,
1234 27.0f, 18.0f, 9.0f, 27.0f,
1235 9.0f, 27.0f, 9.0f, 18.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001236 },
1237 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001238
1239 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001240 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001241 7.0f, 11.0f, 13.0f, 9.0f,
1242 12.0f, 17.0f, 19.0f, 13.0f,
1243 12.0f, 16.0f, 16.0f, 10.0f,
1244 9.0f, 11.0f, 12.0f, 7.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001245 },
1246 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001247
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001248 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001249 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001250}
1251
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001252template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001253LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1254 armnn::IWorkloadFactory& workloadFactory,
1255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1256 float qScale = 1.0f,
1257 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001258{
1259 armnn::Pooling2dDescriptor descriptor;
1260 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1261 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1262 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1263 descriptor.m_PadLeft = 1;
1264 descriptor.m_PadRight = 1;
1265 descriptor.m_PadTop = 1;
1266 descriptor.m_PadBottom = 1;
1267 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1268
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001269 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1270 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001271
1272 // Set quantization parameters if the requested type is a quantized type.
1273 if(armnn::IsQuantizedType<T>())
1274 {
1275 inputTensorInfo.SetQuantizationScale(qScale);
1276 inputTensorInfo.SetQuantizationOffset(qOffset);
1277 outputTensorInfo.SetQuantizationScale(qScale);
1278 outputTensorInfo.SetQuantizationOffset(qOffset);
1279 }
1280
1281 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001282 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001283 2.0f, 4.0f, 8.0f, 16.0f,
1284 4.0f, 2.0f, 2.0f, 4.0f,
1285 8.0f, 2.0f, 4.0f, 2.0f,
1286 16.0f, 2.0f, 2.0f, 8.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001287 },
1288 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001289
1290 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001291 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001292 1.0f, 4.4721f, 8.0f,
1293 4.4721f, 2.6457f, 2.236f,
1294 8.0f, 1.4142f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001295 },
1296 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001297
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001298 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001299 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001300}
1301
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001302template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001303LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1304 armnn::IWorkloadFactory& workloadFactory,
1305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1306 float qScale = 1.0f,
1307 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001308{
1309 armnn::Pooling2dDescriptor descriptor;
1310 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1311 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1312 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1313 descriptor.m_PadLeft = 1;
1314 descriptor.m_PadRight = 1;
1315 descriptor.m_PadTop = 1;
1316 descriptor.m_PadBottom = 1;
1317 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1318
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001319 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1320 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001321
1322 // Set quantization parameters if the requested type is a quantized type.
1323 if(armnn::IsQuantizedType<T>())
1324 {
1325 inputTensorInfo.SetQuantizationScale(qScale);
1326 inputTensorInfo.SetQuantizationOffset(qOffset);
1327 outputTensorInfo.SetQuantizationScale(qScale);
1328 outputTensorInfo.SetQuantizationOffset(qOffset);
1329 }
1330
1331 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001332 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001333 1.0f, 2.0f, 3.0f, 4.0f,
1334 1.0f, 2.0f, 3.0f, 4.0f,
1335 1.0f, 2.0f, 3.0f, 4.0f,
1336 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001337 },
1338 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001339
1340 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001341 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001342 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1343 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1344 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1345 1.0540f, 1.7638f, 2.5385f, 2.3570f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001346 },
1347 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001348
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001349 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001350 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001351}
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001352
1353} // anonymous namespace
1354
1355LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
1356 armnn::IWorkloadFactory& workloadFactory,
1357 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1358 bool forceNoPadding)
1359{
1360 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1361 workloadFactory, memoryManager, forceNoPadding);
1362}
1363
1364LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
1365 armnn::IWorkloadFactory& workloadFactory,
1366 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1367 bool forceNoPadding)
1368{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001369 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001370 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1371}
1372
1373LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
1374 armnn::IWorkloadFactory& workloadFactory,
1375 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1376 bool forceNoPadding)
1377{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001378 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001379 workloadFactory, memoryManager, forceNoPadding);
1380}
1381
1382LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
1383 armnn::IWorkloadFactory& workloadFactory,
1384 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1385 bool forceNoPadding)
1386{
1387 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1388 workloadFactory, memoryManager, forceNoPadding);
1389}
1390
1391LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
1392 armnn::IWorkloadFactory& workloadFactory,
1393 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1394 bool forceNoPadding)
1395{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001396 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001397 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1398}
1399
1400LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
1401 armnn::IWorkloadFactory& workloadFactory,
1402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1403 bool forceNoPadding)
1404{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001405 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001406 workloadFactory, memoryManager, forceNoPadding);
1407}
1408
1409LayerTestResult<float, 4> SimpleMaxPooling2dTest(
1410 armnn::IWorkloadFactory& workloadFactory,
1411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1412 const armnn::DataLayout dataLayout)
1413{
1414 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1415}
1416
1417LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
1418 armnn::IWorkloadFactory& workloadFactory,
1419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1420 const armnn::DataLayout dataLayout)
1421{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001422 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001423}
1424
1425LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
1426 armnn::IWorkloadFactory& workloadFactory,
1427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1428 const armnn::DataLayout dataLayout)
1429{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001430 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001431}
1432LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
1433 armnn::IWorkloadFactory& workloadFactory,
1434 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1435{
1436 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1437}
1438
1439LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
1440 armnn::IWorkloadFactory& workloadFactory,
1441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1442{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001443 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001444 workloadFactory, memoryManager, 1.0f, -5);
1445}
1446
1447LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
1448 armnn::IWorkloadFactory& workloadFactory,
1449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1450{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001451 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001452 workloadFactory, memoryManager);
1453}
1454
1455LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
1456 armnn::IWorkloadFactory& workloadFactory,
1457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1458{
1459 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1460}
1461
1462LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
1463 armnn::IWorkloadFactory& workloadFactory,
1464 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1465{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001466 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001467 workloadFactory, memoryManager, 1.0f, -5);
1468}
1469
1470LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
1471 armnn::IWorkloadFactory& workloadFactory,
1472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1473{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001474 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001475 workloadFactory, memoryManager);
1476}
1477
1478LayerTestResult<float, 4> SimpleAveragePooling2dTest(
1479 armnn::IWorkloadFactory& workloadFactory,
1480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1481 const armnn::DataLayout dataLayout)
1482{
1483 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1484}
1485
1486LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
1487 armnn::IWorkloadFactory& workloadFactory,
1488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1489 const armnn::DataLayout dataLayout)
1490{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001491 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001492 workloadFactory, memoryManager, dataLayout, 0.5, -1);
1493}
1494
1495LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
1496 armnn::IWorkloadFactory& workloadFactory,
1497 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1498 const armnn::DataLayout dataLayout)
1499{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001500 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001501 workloadFactory, memoryManager, dataLayout);
1502}
1503
1504LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
1505 armnn::IWorkloadFactory& workloadFactory,
1506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1507 bool forceNoPadding)
1508{
1509 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1510 workloadFactory, memoryManager, forceNoPadding);
1511}
1512
1513LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
1514 armnn::IWorkloadFactory& workloadFactory,
1515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1516{
1517 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1518}
1519
1520LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
1521 armnn::IWorkloadFactory& workloadFactory,
1522 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1523{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001524 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001525 workloadFactory, memoryManager, 0.5, -1);
1526}
1527
1528LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
1529 armnn::IWorkloadFactory& workloadFactory,
1530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1531{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001532 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001533 workloadFactory, memoryManager);
1534}
1535LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
1536 armnn::IWorkloadFactory& workloadFactory,
1537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1538{
1539 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1540}
1541
1542LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
1543 armnn::IWorkloadFactory& workloadFactory,
1544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1545{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001546 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001547 workloadFactory, memoryManager);
1548}
1549
1550LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
1551 armnn::IWorkloadFactory& workloadFactory,
1552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1553{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001554 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001555 workloadFactory, memoryManager);
1556}
1557
1558LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
1559 armnn::IWorkloadFactory& workloadFactory,
1560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1561{
1562 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1563 workloadFactory, memoryManager);
1564}
1565
1566LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
1567 armnn::IWorkloadFactory& workloadFactory,
1568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1569{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001570 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001571 workloadFactory, memoryManager);
1572}
1573
1574LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
1575 armnn::IWorkloadFactory& workloadFactory,
1576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1577{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001578 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001579 workloadFactory, memoryManager);
1580}
1581
1582LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
1583 armnn::IWorkloadFactory& workloadFactory,
1584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1585{
1586 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1587}
1588
1589LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
1590 armnn::IWorkloadFactory& workloadFactory,
1591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1592{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001593 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001594 workloadFactory, memoryManager);
1595}
1596
1597LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
1598 armnn::IWorkloadFactory& workloadFactory,
1599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1600{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001601 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001602 workloadFactory, memoryManager);
1603}
1604
1605LayerTestResult<float, 4> SimpleL2Pooling2dTest(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1608 const armnn::DataLayout dataLayout)
1609{
1610 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1611}
1612
1613LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
1614 armnn::IWorkloadFactory& workloadFactory,
1615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1616 const armnn::DataLayout dataLayout)
1617{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001618 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001619}
1620
1621LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
1622 armnn::IWorkloadFactory& workloadFactory,
1623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1624 const armnn::DataLayout dataLayout)
1625{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001626 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001627}
1628
1629LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
1630 armnn::IWorkloadFactory& workloadFactory,
1631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1632{
1633 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1634}
1635
1636LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
1637 armnn::IWorkloadFactory& workloadFactory,
1638 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1639{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001640 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001641}
1642
1643LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
1644 armnn::IWorkloadFactory& workloadFactory,
1645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1646{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001647 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001648}
1649
1650LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
1651 armnn::IWorkloadFactory& workloadFactory,
1652 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1653{
1654 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1655}
1656
1657LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
1658 armnn::IWorkloadFactory& workloadFactory,
1659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1660{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001661 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001662}
1663
1664LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
1665 armnn::IWorkloadFactory& workloadFactory,
1666 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1667{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001668 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001669}
1670LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
1671 armnn::IWorkloadFactory& workloadFactory,
1672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1673{
1674 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1675}
1676
1677LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
1678 armnn::IWorkloadFactory& workloadFactory,
1679 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1680{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001681 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001682}
1683
1684LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
1685 armnn::IWorkloadFactory& workloadFactory,
1686 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1687{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001688 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001689}
1690
1691LayerTestResult<float, 4> L2Pooling2dSize7Test(
1692 armnn::IWorkloadFactory& workloadFactory,
1693 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1694{
1695 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1696}
1697
1698LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
1699 armnn::IWorkloadFactory& workloadFactory,
1700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1701{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001702 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001703}
1704
1705LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
1706 armnn::IWorkloadFactory& workloadFactory,
1707 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1708{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001709 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001710}
1711
1712LayerTestResult<float, 4> L2Pooling2dSize9Test(
1713 armnn::IWorkloadFactory& workloadFactory,
1714 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1715{
1716 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1717}
1718
1719LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
1720 armnn::IWorkloadFactory& workloadFactory,
1721 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1722{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001723 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001724}
1725
1726LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
1727 armnn::IWorkloadFactory& workloadFactory,
1728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1729{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001730 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001731}
1732LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
1733 armnn::IWorkloadFactory& workloadFactory,
1734 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1735{
1736 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1737}
1738
1739LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
1740 armnn::IWorkloadFactory& workloadFactory,
1741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1742{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001743 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001744}
1745
1746LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
1747 armnn::IWorkloadFactory& workloadFactory,
1748 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1749{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001750 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001751}
1752
1753LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
1754 armnn::IWorkloadFactory& workloadFactory,
1755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1756{
1757 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1758}
1759
1760LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
1761 armnn::IWorkloadFactory& workloadFactory,
1762 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1763{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001764 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001765}
1766
1767LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
1768 armnn::IWorkloadFactory& workloadFactory,
1769 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1770{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001771 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001772}
1773
1774LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
1775 armnn::IWorkloadFactory& workloadFactory,
1776 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1777{
1778 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1779}
1780
1781LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
1782 armnn::IWorkloadFactory& workloadFactory,
1783 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1784{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001785 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001786}
1787
1788LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
1789 armnn::IWorkloadFactory& workloadFactory,
1790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1791{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001792 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001793}
1794
1795LayerTestResult<float, 4> ComparePooling2dTest(
1796 armnn::IWorkloadFactory& workloadFactory,
1797 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1798 armnn::IWorkloadFactory& refWorkloadFactory,
1799 armnn::PoolingAlgorithm poolingType)
1800{
1801 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1802 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1803}
1804
1805LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
1806 armnn::IWorkloadFactory& workloadFactory,
1807 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1808 armnn::IWorkloadFactory& refWorkloadFactory,
1809 armnn::PoolingAlgorithm poolingType)
1810{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001811 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001812 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1813}
1814
1815LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
1816 armnn::IWorkloadFactory& workloadFactory,
1817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1818 armnn::IWorkloadFactory& refWorkloadFactory,
1819 armnn::PoolingAlgorithm poolingType)
1820{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001821 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001822 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1823}