blob: 89e46fbdb164cf55523f7f1da7b66b71516d71c9 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa014fcda012018-03-09 14:13:49 +00005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "Pooling2dTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010
11#include <armnn/LayerSupport.hpp>
12
13#include <armnnUtils/TensorUtils.hpp>
14#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016
Jan Eilers8eb25602020-03-09 12:13:48 +000017#include <armnn/utility/IgnoreUnused.hpp>
18
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/WorkloadInfo.hpp>
20
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010021#include <backendsCommon/test/TensorCopyUtils.hpp>
22#include <backendsCommon/test/WorkloadTestUtils.hpp>
23
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000024#include <test/TensorHelpers.hpp>
25
James Conroy45a9b772018-10-31 11:47:53 +000026#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000027
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028namespace
29{
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000030
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010031using namespace armnnUtils;
32
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000033template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000034LayerTestResult<T, 4> SimplePooling2dTestImpl(
35 armnn::IWorkloadFactory& workloadFactory,
36 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
37 armnn::Pooling2dDescriptor descriptor,
38 float qScale,
39 int32_t qOffset,
40 const boost::multi_array<T, 4>& input,
41 const boost::multi_array<T, 4>& outputExpected)
telsoa014fcda012018-03-09 14:13:49 +000042{
Jan Eilers8eb25602020-03-09 12:13:48 +000043 IgnoreUnused(memoryManager);
Matthew Bentham8800c002018-11-19 13:19:28 +000044 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
Matteo Martincigh21350152018-11-28 16:22:22 +000045 const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
Matthew Bentham8800c002018-11-19 13:19:28 +000046 auto heightIndex = dimensionIndices.GetHeightIndex();
47 auto widthIndex = dimensionIndices.GetWidthIndex();
48 auto channelsIndex = dimensionIndices.GetChannelsIndex();
telsoa014fcda012018-03-09 14:13:49 +000049
James Conroy69482272018-10-19 10:41:35 +010050 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
51 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
52 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
53 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
54
55 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
56 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
57 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
telsoa014fcda012018-03-09 14:13:49 +000058 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
59
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000060 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
61 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
62
63 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
64 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000065
66 // Set quantization parameters if the requested type is a quantized type.
67 if(armnn::IsQuantizedType<T>())
68 {
69 inputTensorInfo.SetQuantizationScale(qScale);
70 inputTensorInfo.SetQuantizationOffset(qOffset);
71 outputTensorInfo.SetQuantizationScale(qScale);
72 outputTensorInfo.SetQuantizationOffset(qOffset);
73 }
74
75 LayerTestResult<T, 4> result(outputTensorInfo);
76
77 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
78 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
79
80 armnn::Pooling2dQueueDescriptor queueDescriptor;
81 queueDescriptor.m_Parameters = descriptor;
James Conroy45a9b772018-10-31 11:47:53 +000082 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +010083
84 armnn::WorkloadInfo workloadInfo;
85 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
86 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
87
88 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +010089 armnn::BackendId backend = workloadFactory.GetBackendId();
Francis Murtagh043d0d02018-10-05 14:08:48 +010090 const size_t reasonIfUnsupportedMaxLen = 255;
91 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +010092 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
Francis Murtagh043d0d02018-10-05 14:08:48 +010093 queueDescriptor.m_Parameters,
94 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
95 if (!result.supported)
96 {
97 return result;
98 }
99
100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
101
102 inputHandle->Allocate();
103 outputHandle->Allocate();
104
105 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
106
107 workload->Execute();
108
109 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
110
111 result.outputExpected = outputExpected;
112
113 return result;
114}
115
telsoa014fcda012018-03-09 14:13:49 +0000116//
117// Tests max pooling with the following parameters:
118//
119// Pooling size: 3x3
120// Stride: (2,4)
121// input size: 8x13
122// channels: 2
123// batch size: 2
124//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000125template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000126LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
127 armnn::IWorkloadFactory& workloadFactory,
128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
129 bool forceNoPadding,
130 float qScale = 1.0f,
131 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000132{
133 armnn::Pooling2dDescriptor descriptor;
134 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
135 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
136 descriptor.m_StrideX = 2;
137 descriptor.m_StrideY = 4;
138 // forceNoPadding is mainly used for compatibility with ARM Compute.
139 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
140 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
141 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
142 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
143 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
144
145 unsigned int inputWidth = 8;
146 unsigned int inputHeight = 13;
147 unsigned int outputWidth =
148 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
149 descriptor.m_StrideX;
150 unsigned int outputHeight =
151 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
152 descriptor.m_StrideY;
153 unsigned int channels = 2;
154 unsigned int batchSize = 2;
155
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000156 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
157 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000158
159 // Set quantization parameters if the requested type is a quantized type.
160 if(armnn::IsQuantizedType<T>())
161 {
162 inputTensorInfo.SetQuantizationScale(qScale);
163 inputTensorInfo.SetQuantizationOffset(qOffset);
164 outputTensorInfo.SetQuantizationScale(qScale);
165 outputTensorInfo.SetQuantizationOffset(qOffset);
166 }
167
168 std::vector<float> singleChannelData({
169 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
170 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
171 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
172 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
173 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
174 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
175 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
176 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
177 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
178 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
179 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
180 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
181 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
182 });
183
telsoa01c577f2c2018-08-31 09:22:23 +0100184 // Constructs input data.
telsoa014fcda012018-03-09 14:13:49 +0000185 std::vector<float> inputData;
186 auto negator = [](float f) { return -f; };
187
telsoa01c577f2c2018-08-31 09:22:23 +0100188 // First image (two channels where the second channel is the negative of the first one).
telsoa014fcda012018-03-09 14:13:49 +0000189 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
190 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
191
telsoa01c577f2c2018-08-31 09:22:23 +0100192 // Second image (same as first image).
telsoa014fcda012018-03-09 14:13:49 +0000193 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
194 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
195
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100196 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000197
telsoa01c577f2c2018-08-31 09:22:23 +0100198 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000199 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
200 boost::multi_array<T, 4> outputExpected(shape);
201 if (forceNoPadding)
202 {
203 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100204 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000205 8.0f, 8.0f, 8.0f,
206 9.0f, 7.0f, 9.0f,
207 9.0f, 9.0f, 9.0f,
208
209 0.0f, 0.0f, -3.0f,
210 -1.0f, 0.0f, 0.0f,
211 -1.0f, -1.0f, -1.0f,
212
213 8.0f, 8.0f, 8.0f,
214 9.0f, 7.0f, 9.0f,
215 9.0f, 9.0f, 9.0f,
216
217 0.0f, 0.0f, -3.0f,
218 -1.0f, 0.0f, 0.0f,
219 -1.0f, -1.0f, -1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100220 },
221 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000222 }
223 else
224 {
225 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100226 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000227 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
228 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
229 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
230
Finn Williams70f609b2019-11-06 16:54:53 +0000231 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
232 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
233 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
telsoa014fcda012018-03-09 14:13:49 +0000234
235 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
236 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
237 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
238
Finn Williams70f609b2019-11-06 16:54:53 +0000239 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
240 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
241 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100242 },
243 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000244 }
245
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000246 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000247 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000248}
249
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000250template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000251LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
252 armnn::IWorkloadFactory& workloadFactory,
253 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000254 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000255 float qScale = 1.0f,
256 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000257{
258 armnn::Pooling2dDescriptor descriptor;
James Conroy45a9b772018-10-31 11:47:53 +0000259 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
telsoa014fcda012018-03-09 14:13:49 +0000260 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
261 descriptor.m_StrideX = descriptor.m_StrideY = 2;
telsoa014fcda012018-03-09 14:13:49 +0000262 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy69482272018-10-19 10:41:35 +0100263 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000264
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000265 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
266 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000267
268 // Set quantization parameters if the requested type is a quantized type.
269 if(armnn::IsQuantizedType<T>())
270 {
271 inputTensorInfo.SetQuantizationScale(qScale);
272 inputTensorInfo.SetQuantizationOffset(qOffset);
273 outputTensorInfo.SetQuantizationScale(qScale);
274 outputTensorInfo.SetQuantizationOffset(qOffset);
275 }
276
James Conroy45a9b772018-10-31 11:47:53 +0000277 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100278 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000279 1.0f, 2.0f, 5.0f, 6.0f,
280 3.0f, 4.0f, 7.0f, 8.0f,
281 9.0f, 10.0f, 13.0f, 14.0f,
282 11.0f, 12.0f, 15.0f, 16.0f,
283
284 17.0f, 18.0f, 21.0f, 22.0f,
285 19.0f, 20.0f, 23.0f, 24.0f,
286 25.0f, 26.0f, 29.0f, 30.0f,
287 27.0f, 28.0f, 31.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100288 },
289 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000290
James Conroy45a9b772018-10-31 11:47:53 +0000291 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100292 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000293 4.0f, 8.0f,
294 12.0f, 16.0f,
295
296 20.0f, 24.0f,
297 28.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100298 },
299 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000300
James Conroy45a9b772018-10-31 11:47:53 +0000301 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000302 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000303 {
304 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000305 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000306 inputData = tmp;
307
308 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000309 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000310 outputData = tmp1;
311 }
312
313 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
314
315 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
316
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000317 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000318 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000319}
320
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000321template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000322LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
323 armnn::IWorkloadFactory& workloadFactory,
324 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000325 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000326 float qScale = 1.0f,
327 int32_t qOffset = 0)
Francis Murtagh043d0d02018-10-05 14:08:48 +0100328{
James Conroy45a9b772018-10-31 11:47:53 +0000329 armnn::Pooling2dDescriptor descriptor;
330 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
331 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
332 descriptor.m_StrideX = descriptor.m_StrideY = 2;
333 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
334 descriptor.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +0100335
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000336 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
337 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100338
James Conroy45a9b772018-10-31 11:47:53 +0000339 // Set quantization parameters if the requested type is a quantized type.
340 if(armnn::IsQuantizedType<T>())
341 {
342 inputTensorInfo.SetQuantizationScale(qScale);
343 inputTensorInfo.SetQuantizationOffset(qOffset);
344 outputTensorInfo.SetQuantizationScale(qScale);
345 outputTensorInfo.SetQuantizationOffset(qOffset);
346 }
Francis Murtagh043d0d02018-10-05 14:08:48 +0100347
James Conroy45a9b772018-10-31 11:47:53 +0000348 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100349 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000350 2.0f, 2.0f, 6.0f, 6.0f,
351 4.0f, 4.0f, 8.0f, 8.0f,
352 10.0f, 12.0f, 14.0f, 16.0f,
353 10.0f, 12.0f, 16.0f, 14.0f,
354
355 18.0f, 20.0f, 24.0f, 22.0f,
356 20.0f, 18.0f, 22.0f, 24.0f,
357 26.0f, 28.0f, 0.0f, 0.0f,
358 26.0f, 28.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100359 },
360 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000361
362 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100363 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000364 3.0f, 7.0f,
365 11.0f, 15.0f,
366
367 19.0f, 23.0f,
368 27.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100369 },
370 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000371
372 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000373 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000374 {
375 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000376 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000377 inputData = tmp;
378
379 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000380 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000381 outputData = tmp1;
382 }
383
384 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
385
386 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
387
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000388 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000389 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100390}
391
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000392template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000393LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
394 armnn::IWorkloadFactory& workloadFactory,
395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
396 float qScale = 1.0f,
397 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000398{
399 armnn::Pooling2dDescriptor descriptor;
400 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
401 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
402 descriptor.m_StrideX = descriptor.m_StrideY = 5;
403 descriptor.m_PadLeft = 50;
404 descriptor.m_PadRight = 50;
405 descriptor.m_PadTop = 50;
406 descriptor.m_PadBottom = 50;
407 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
408
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000409 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
410 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000411
412 // Set quantization parameters if the requested type is a quantized type.
413 if(armnn::IsQuantizedType<T>())
414 {
415 inputTensorInfo.SetQuantizationScale(qScale);
416 inputTensorInfo.SetQuantizationOffset(qOffset);
417 outputTensorInfo.SetQuantizationScale(qScale);
418 outputTensorInfo.SetQuantizationOffset(qOffset);
419 }
420
421 std::vector<T> inputVec;
422
423 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
424 {
425 inputVec.push_back(1);
426 }
427
428 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
429
430 std::vector<T> outputVec;
431
432 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
433 {
434 outputVec.push_back(1);
435 }
436
437 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
438
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000439 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000440 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000441}
442
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000443template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000444LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
445 armnn::IWorkloadFactory& workloadFactory,
446 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000447 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000448 float qScale = 1.0f,
449 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000450{
451 armnn::Pooling2dDescriptor descriptor;
452 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
453 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
454 descriptor.m_StrideX = descriptor.m_StrideY = 2;
455 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy45a9b772018-10-31 11:47:53 +0000456 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000457
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000458 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
459 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
James Conroy45a9b772018-10-31 11:47:53 +0000460
461 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100462 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000463 1.0f, 7.0f, 5.0f, 5.0f,
464 1.0f, 7.0f, 5.0f, 5.0f,
465 3.0f, 3.0f, 1.0f, 1.0f,
466 3.0f, 3.0f, 1.0f, 1.0f,
467
468 1.0f, 7.0f, 0.0f, 0.0f,
469 1.0f, 7.0f, 2.0f, 0.0f,
470 0.0f, 2.0f, 1.0f, 1.0f,
471 0.0f, 0.0f, 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100472 },
473 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000474
James Conroy45a9b772018-10-31 11:47:53 +0000475 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100476 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000477 5.0f, 5.0f,
James Conroy45a9b772018-10-31 11:47:53 +0000478 3.0f, 1.0f,
479
480 5.0f, 1.0f,
481 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100482 },
483 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000484
James Conroy45a9b772018-10-31 11:47:53 +0000485 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000486 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000487 {
488 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000489 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000490 inputData = tmp;
491
492 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000493 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000494 outputData = tmp1;
495 }
496
497 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
498
499 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
500
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000501 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000502 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000503}
504
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000505template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000506LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
507 armnn::IWorkloadFactory& workloadFactory,
508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
509 float qScale = 1.0f,
510 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000511{
512 armnn::Pooling2dDescriptor descriptor;
513 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
514 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
515 descriptor.m_StrideX = descriptor.m_StrideY = 1;
516 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
517
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000518 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000519 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100520 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000521 2.0f, 1.0f, 5.0f, 2.0f,
522 1.0f, 2.0f, 2.0f, 1.0f,
523 5.0f, 4.0f, 1.0f, 5.0f,
524 2.0f, 1.0f, 5.0f, 2.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100525 },
526 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000527
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000528 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000529 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100530 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000531 3.0f, 3.0f,
532 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100533 },
534 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000535
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000536 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000537 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000538}
539
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000540template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000541LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
542 armnn::IWorkloadFactory& workloadFactory,
543 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
544 float qScale = 1.0f,
545 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000546{
547 armnn::Pooling2dDescriptor descriptor;
548 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
549 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
550 descriptor.m_StrideX = descriptor.m_StrideY = 3;
551 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
552
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000553 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000554 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100555 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000556 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
557 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
558 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
559 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
560 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
561 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
562 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
563 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
564 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100565 },
566 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000567
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000568 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000569 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100570 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000571 3.0f, 3.0f, 3.0f,
572 3.0f, 3.0f, 3.0f,
573 3.0f, 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100574 },
575 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000576
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000577 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000578 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000579}
580
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000581template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000582LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
583 armnn::IWorkloadFactory& workloadFactory,
584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
585 float qScale = 1.0f,
586 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000587{
588 armnn::Pooling2dDescriptor descriptor;
589 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
590 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
591 descriptor.m_StrideX = descriptor.m_StrideY = 4;
592 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
593
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000594 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000595 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100596 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000597 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
598 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
599 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
600 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
601 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
602 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
603 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100604 },
605 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000606
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000607 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000608 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100609 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000610 3.0f, 3.0f,
611 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100612 },
613 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000614
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000615 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000616 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000617}
618
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000619template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000620LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
621 armnn::IWorkloadFactory& workloadFactory,
622 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
623 float qScale = 1.0f,
624 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000625{
626 armnn::Pooling2dDescriptor descriptor;
627 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
628 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
629 descriptor.m_StrideX = descriptor.m_StrideY = 7;
630 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
631
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000632 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000633 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100634 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000635 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
636 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
637 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
638 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
639 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
640 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
641 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100642 },
643 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000644
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000645 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000646 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100647 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000648 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100649 },
650 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000651
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000652 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000653 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000654}
655
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000656template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000657LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
658 armnn::IWorkloadFactory& workloadFactory,
659 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
660 float qScale = 1.0f,
661 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000662{
663 armnn::Pooling2dDescriptor descriptor;
664 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
665 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
666 descriptor.m_StrideX = descriptor.m_StrideY = 9;
667 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
668
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000669 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000670 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100671 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000672 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
673 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
674 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
675 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
676 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
677 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
678 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
679 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
680 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100681 },
682 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000683
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000684 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000685 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100686 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000687 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100688 },
689 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000690
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000691 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000692 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000693}
694
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000695template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000696LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
697 armnn::IWorkloadFactory& workloadFactory,
698 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
699 float qScale = 1.0f,
700 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000701{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000702 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
703 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000704
705 armnn::Pooling2dDescriptor descriptor;
706 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
707 descriptor.m_PoolWidth = 2;
708 descriptor.m_PoolHeight = 3;
709 descriptor.m_StrideX = 2;
710 descriptor.m_StrideY = 1;
711 descriptor.m_PadLeft = 2;
712 descriptor.m_PadRight = 0;
713 descriptor.m_PadTop = 1;
714 descriptor.m_PadBottom = 2;
715 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
716 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
717
telsoa01c577f2c2018-08-31 09:22:23 +0100718 // Construct input data.
telsoa014fcda012018-03-09 14:13:49 +0000719 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100720 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000721 1.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100722 },
723 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000724
telsoa01c577f2c2018-08-31 09:22:23 +0100725 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000726 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100727 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000728 0.0f, 3.0f, 0.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100729 },
730 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000731
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000732 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000733 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000734}
735
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000736template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000737LayerTestResult<T, 4> ComparePooling2dTestCommon(
738 armnn::IWorkloadFactory& workloadFactory,
739 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
740 armnn::IWorkloadFactory& refWorkloadFactory,
741 armnn::PoolingAlgorithm poolingType,
742 float qScale = 1.0f,
743 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000744{
Jan Eilers8eb25602020-03-09 12:13:48 +0000745 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000746 const unsigned int inputWidth = 16;
747 const unsigned int inputHeight = 32;
748 const unsigned int channelCount = 2;
749 const unsigned int batchSize = 5;
750
751 const unsigned int poolSize = 3;
752 const unsigned int strideX = 2;
753 const unsigned int strideY = 4;
754 const unsigned int padX = 0;
755 const unsigned int padY = 0;
756
757 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
758 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
759
760 armnn::TensorInfo inputTensorInfo;
761 armnn::TensorInfo outputTensorInfo;
762
763 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
764 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
765
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000766 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
767 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000768
769 // Set quantization parameters if the requested type is a quantized type.
770 if(armnn::IsQuantizedType<T>())
771 {
772 inputTensorInfo.SetQuantizationScale(qScale);
773 inputTensorInfo.SetQuantizationOffset(qOffset);
774 outputTensorInfo.SetQuantizationScale(qScale);
775 outputTensorInfo.SetQuantizationOffset(qOffset);
776 }
777
778 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
779
780 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
781
782 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
783 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
784
785 armnn::Pooling2dQueueDescriptor data;
786 armnn::WorkloadInfo info;
787 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
788 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
789 data.m_Parameters.m_PoolType = poolingType;
790 data.m_Parameters.m_PoolWidth = poolSize;
791 data.m_Parameters.m_PoolHeight = poolSize;
792 data.m_Parameters.m_StrideX = strideX;
793 data.m_Parameters.m_StrideY = strideY;
794 data.m_Parameters.m_PadLeft = padX;
795 data.m_Parameters.m_PadRight = padX;
796 data.m_Parameters.m_PadTop = padY;
797 data.m_Parameters.m_PadBottom = padY;
798 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
799
800 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
801 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
802
803 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100804 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000805 const size_t reasonIfUnsupportedMaxLen = 255;
806 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100807 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
telsoa014fcda012018-03-09 14:13:49 +0000808 data.m_Parameters,
809 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
810 if (!comparisonResult.supported)
811 {
812 return comparisonResult;
813 }
814
815 armnn::Pooling2dQueueDescriptor refData = data;
816 armnn::WorkloadInfo refInfo = info;
817 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
818 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
819
820 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
821 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
822
823 outputHandleRef->Allocate();
824 inputHandleRef->Allocate();
825 inputHandle->Allocate();
826 outputHandle->Allocate();
827
828 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
829 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
830
831 workload->Execute();
832 workloadRef->Execute();
833
834 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
835 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
836
837 return comparisonResult;
838}
839
840//
841// Tests max pooling with the following parameters:
842//
843// Pooling size: 2x2
844// Stride: (2,2)
845// input size: 4x4
846// channels: 1
847// batch size: 1
848//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000849template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000850LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
851 armnn::IWorkloadFactory& workloadFactory,
852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
853 bool forceNoPadding,
854 float qScale = 1.0f,
855 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000856{
857 armnn::Pooling2dDescriptor descriptor;
858 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
859 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
860 descriptor.m_StrideX = 2;
861 descriptor.m_StrideY = 2;
862 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
863 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
864 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
865 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
866
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000867
telsoa014fcda012018-03-09 14:13:49 +0000868 unsigned int inputWidth = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000869
telsoa014fcda012018-03-09 14:13:49 +0000870 unsigned int inputHeight = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000871
telsoa014fcda012018-03-09 14:13:49 +0000872 unsigned int outputWidth =
873 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
874 descriptor.m_StrideX;
875 unsigned int outputHeight =
876 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
877 descriptor.m_StrideY;
878 unsigned int channels = 1;
879 unsigned int batchSize = 1;
880
881 std::vector<float> inputData = {
882 510.0f, 222.0f, 780.0f, 654.0f,
883 141.0f, 276.0f, 15.0f, 546.0f,
884 303.0f, 618.0f, 582.0f, 339.0f,
885 438.0f, 564.0f, 573.0f, 402.0f
886 };
887
telsoa01c577f2c2018-08-31 09:22:23 +0100888 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
telsoa014fcda012018-03-09 14:13:49 +0000889 std::vector<float> expectedOutputDataWithPadding = {
890 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
891 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
892 };
893
894 std::vector<float> expectedOutputDataNoPadding = {
895 510.0f, 780.0f,
896 618.0f, 582.0f
897 };
898
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000899 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000900
901 // Scale and offset should match input - we're just calculating maximum values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000902 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000903
904 // Set quantization parameters if the requested type is a quantized type.
905 if(armnn::IsQuantizedType<T>())
906 {
907 inputTensorInfo.SetQuantizationScale(qScale);
908 inputTensorInfo.SetQuantizationOffset(qOffset);
909 outputTensorInfo.SetQuantizationScale(qScale);
910 outputTensorInfo.SetQuantizationOffset(qOffset);
911 }
912
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100913 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000914
915 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100916 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
917 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000918
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000919 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000920 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000921}
922
surmeh01bceff2f2018-03-29 16:29:27 +0100923//
924// Tests max pooling with the following parameters:
925//
926// Pooling size: 3x2
927// Stride: (2,2)
928// input size: 3x2
929// channels: 1
930// batch size: 1
931//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000932template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
surmeh01bceff2f2018-03-29 16:29:27 +0100933LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
934 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000935 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +0100936 bool forceNoPadding,
937 float qScale = 1.0f,
938 int32_t qOffset = 0)
939{
940 armnn::Pooling2dDescriptor descriptor;
941 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
942 descriptor.m_PoolWidth = 3;
943 descriptor.m_PoolHeight = 2;
944 descriptor.m_StrideX = 2;
945 descriptor.m_StrideY = 2;
946 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
947 descriptor.m_PadRight = descriptor.m_PadLeft;
948 descriptor.m_PadTop = 0;
949 descriptor.m_PadBottom = 0;
950 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
951 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
952
953 unsigned int inputWidth = 3;
954 unsigned int inputHeight = 2;
955 unsigned int outputWidth =
956 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
957 descriptor.m_StrideX;
958 unsigned int outputHeight =
959 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
960 descriptor.m_StrideY;
961 unsigned int channels = 1;
962 unsigned int batchSize = 1;
963
964 std::vector<float> inputData = {
965 3.0f, 6.0f, 9.0f,
966 12.0f, 15.0f, 18.0f,
967 };
968
969 std::vector<float> expectedOutputDataWithPadding = {
970 6.0f, 8.0f,
971 };
972
973 std::vector<float> expectedOutputDataNoPadding = {
974 10.5f,
975 };
976
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000977 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100978
979 // Scale and offset should match input - we're just calculating average values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000980 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100981
982 // Set quantization parameters if the requested type is a quantized type.
983 if(armnn::IsQuantizedType<T>())
984 {
985 inputTensorInfo.SetQuantizationScale(qScale);
986 inputTensorInfo.SetQuantizationOffset(qOffset);
987 outputTensorInfo.SetQuantizationScale(qScale);
988 outputTensorInfo.SetQuantizationOffset(qOffset);
989 }
990
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100991 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +0100992
993 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100994 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
995 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +0100996
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000997 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000998 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
surmeh01bceff2f2018-03-29 16:29:27 +0100999}
1000
1001
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001002template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001003LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1004 armnn::IWorkloadFactory& workloadFactory,
1005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1006 float qScale = 1.0f,
1007 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001008{
1009 armnn::Pooling2dDescriptor descriptor;
1010 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1011 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1012 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1013 descriptor.m_PadLeft = 1;
1014 descriptor.m_PadRight = 1;
1015 descriptor.m_PadTop = 1;
1016 descriptor.m_PadBottom = 1;
1017 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1018
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001019 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1020 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001021
1022 // Set quantization parameters if the requested type is a quantized type.
1023 if(armnn::IsQuantizedType<T>())
1024 {
1025 inputTensorInfo.SetQuantizationScale(qScale);
1026 inputTensorInfo.SetQuantizationOffset(qOffset);
1027 outputTensorInfo.SetQuantizationScale(qScale);
1028 outputTensorInfo.SetQuantizationOffset(qOffset);
1029 }
1030
1031 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001032 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001033 -1.0f, -2.0f, 3.0f, 4.0f,
1034 -1.0f, -2.0f, 3.0f, 4.0f,
1035 1.0f, 2.0f, -3.0f, -4.0f,
1036 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001037 },
1038 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001039
1040 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001041 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001042 -1.0f, 3.0f, 4.0f,
1043 1.0f, 3.0f, 4.0f,
1044 1.0f, 2.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001045 },
1046 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001047
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001048 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001049 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001050}
1051
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001052template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001053LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1054 armnn::IWorkloadFactory& workloadFactory,
1055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1056 float qScale = 1.0f,
1057 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001058{
1059 armnn::Pooling2dDescriptor descriptor;
1060 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1061 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1062 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1063 descriptor.m_PadLeft = 1;
1064 descriptor.m_PadRight = 1;
1065 descriptor.m_PadTop = 1;
1066 descriptor.m_PadBottom = 1;
1067 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1068
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001069 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1070 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001071
1072 // Set quantization parameters if the requested type is a quantized type.
1073 if(armnn::IsQuantizedType<T>())
1074 {
1075 inputTensorInfo.SetQuantizationScale(qScale);
1076 inputTensorInfo.SetQuantizationOffset(qOffset);
1077 outputTensorInfo.SetQuantizationScale(qScale);
1078 outputTensorInfo.SetQuantizationOffset(qOffset);
1079 }
1080
1081 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001082 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001083 -1.0f, -2.0f, 3.0f, 4.0f,
1084 -1.0f, -2.0f, 3.0f, 4.0f,
1085 1.0f, 2.0f, -3.0f, -4.0f,
1086 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001087 },
1088 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001089
1090 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001091 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001092 -1.0f, 3.0f, 4.0f, 4.0f,
1093 2.0f, 3.0f, 4.0f, 4.0f,
1094 2.0f, 3.0f, 4.0f, 4.0f,
1095 2.0f, 2.0f, 2.0f, -3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001096 },
1097 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001098
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001099 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001100 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001101}
1102
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001103template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001104LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1105 armnn::IWorkloadFactory& workloadFactory,
1106 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1107 float qScale = 1.0f,
1108 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001109{
1110 armnn::Pooling2dDescriptor descriptor;
1111 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1112 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1113 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1114 descriptor.m_PadLeft = 1;
1115 descriptor.m_PadRight = 1;
1116 descriptor.m_PadTop = 1;
1117 descriptor.m_PadBottom = 1;
1118 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1119
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001120 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1121 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001122
1123 // Set quantization parameters if the requested type is a quantized type.
1124 if(armnn::IsQuantizedType<T>())
1125 {
1126 inputTensorInfo.SetQuantizationScale(qScale);
1127 inputTensorInfo.SetQuantizationOffset(qOffset);
1128 outputTensorInfo.SetQuantizationScale(qScale);
1129 outputTensorInfo.SetQuantizationOffset(qOffset);
1130 }
1131
1132 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001133 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001134 12.0f, 20.0f, 32.0f, 40.0f,
1135 12.0f, 20.0f, 32.0f, 40.0f,
1136 12.0f, 20.0f, 32.0f, 40.0f,
1137 12.0f, 20.0f, 32.0f, 40.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001138 },
1139 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001140
1141 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001142 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001143 3.0f, 13.0f, 10.0f,
1144 6.0f, 26.0f, 20.0f,
1145 3.0f, 13.0f, 10.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001146 },
1147 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001148
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001149 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001150 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001151}
1152
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001153template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001154LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1155 armnn::IWorkloadFactory& workloadFactory,
1156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1157 float qScale = 1.0f,
1158 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001159{
1160 armnn::Pooling2dDescriptor descriptor;
1161 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1162 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1163 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1164 descriptor.m_PadLeft = 0;
1165 descriptor.m_PadRight = 0;
1166 descriptor.m_PadTop = 0;
1167 descriptor.m_PadBottom = 0;
1168 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1169 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1170
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001171 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1172 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001173
1174 // Set quantization parameters if the requested type is a quantized type.
1175 if(armnn::IsQuantizedType<T>())
1176 {
1177 inputTensorInfo.SetQuantizationScale(qScale);
1178 inputTensorInfo.SetQuantizationOffset(qOffset);
1179 outputTensorInfo.SetQuantizationScale(qScale);
1180 outputTensorInfo.SetQuantizationOffset(qOffset);
1181 }
1182
1183 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001184 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001185 1.0f, 2.0f, 3.0f, 4.0f,
1186 1.0f, 2.0f, 3.0f, 4.0f,
1187 1.0f, 2.0f, 3.0f, 4.0f,
1188 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001189 },
1190 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001191
1192 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001193 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001194 2.0f, 3.5f,
1195 2.0f, 3.5f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001196 },
1197 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001198
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001199 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001200 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001201}
1202
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001203template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001204LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1205 armnn::IWorkloadFactory& workloadFactory,
1206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1207 float qScale = 1.0f,
1208 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001209{
1210 armnn::Pooling2dDescriptor descriptor;
1211 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1212 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1213 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1214 descriptor.m_PadLeft = 1;
1215 descriptor.m_PadRight = 1;
1216 descriptor.m_PadTop = 1;
1217 descriptor.m_PadBottom = 1;
1218 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1219
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001220 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1221 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001222
1223 // Set quantization parameters if the requested type is a quantized type.
1224 if(armnn::IsQuantizedType<T>())
1225 {
1226 inputTensorInfo.SetQuantizationScale(qScale);
1227 inputTensorInfo.SetQuantizationOffset(qOffset);
1228 outputTensorInfo.SetQuantizationScale(qScale);
1229 outputTensorInfo.SetQuantizationOffset(qOffset);
1230 }
1231
1232 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001233 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001234 9.0f, 27.0f, 18.0f, 36.0f,
1235 18.0f, 9.0f, 18.0f, 9.0f,
1236 27.0f, 18.0f, 9.0f, 27.0f,
1237 9.0f, 27.0f, 9.0f, 18.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001238 },
1239 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001240
1241 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001242 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001243 7.0f, 11.0f, 13.0f, 9.0f,
1244 12.0f, 17.0f, 19.0f, 13.0f,
1245 12.0f, 16.0f, 16.0f, 10.0f,
1246 9.0f, 11.0f, 12.0f, 7.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001247 },
1248 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001249
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001250 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001251 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001252}
1253
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001254template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001255LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1256 armnn::IWorkloadFactory& workloadFactory,
1257 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1258 float qScale = 1.0f,
1259 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001260{
1261 armnn::Pooling2dDescriptor descriptor;
1262 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1263 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1264 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1265 descriptor.m_PadLeft = 1;
1266 descriptor.m_PadRight = 1;
1267 descriptor.m_PadTop = 1;
1268 descriptor.m_PadBottom = 1;
1269 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1270
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001271 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1272 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001273
1274 // Set quantization parameters if the requested type is a quantized type.
1275 if(armnn::IsQuantizedType<T>())
1276 {
1277 inputTensorInfo.SetQuantizationScale(qScale);
1278 inputTensorInfo.SetQuantizationOffset(qOffset);
1279 outputTensorInfo.SetQuantizationScale(qScale);
1280 outputTensorInfo.SetQuantizationOffset(qOffset);
1281 }
1282
1283 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001284 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001285 2.0f, 4.0f, 8.0f, 16.0f,
1286 4.0f, 2.0f, 2.0f, 4.0f,
1287 8.0f, 2.0f, 4.0f, 2.0f,
1288 16.0f, 2.0f, 2.0f, 8.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001289 },
1290 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001291
1292 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001293 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001294 1.0f, 4.4721f, 8.0f,
1295 4.4721f, 2.6457f, 2.236f,
1296 8.0f, 1.4142f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001297 },
1298 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001299
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001300 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001301 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001302}
1303
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001304template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001305LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1306 armnn::IWorkloadFactory& workloadFactory,
1307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1308 float qScale = 1.0f,
1309 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001310{
1311 armnn::Pooling2dDescriptor descriptor;
1312 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1313 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1314 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1315 descriptor.m_PadLeft = 1;
1316 descriptor.m_PadRight = 1;
1317 descriptor.m_PadTop = 1;
1318 descriptor.m_PadBottom = 1;
1319 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1320
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001321 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1322 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001323
1324 // Set quantization parameters if the requested type is a quantized type.
1325 if(armnn::IsQuantizedType<T>())
1326 {
1327 inputTensorInfo.SetQuantizationScale(qScale);
1328 inputTensorInfo.SetQuantizationOffset(qOffset);
1329 outputTensorInfo.SetQuantizationScale(qScale);
1330 outputTensorInfo.SetQuantizationOffset(qOffset);
1331 }
1332
1333 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001334 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001335 1.0f, 2.0f, 3.0f, 4.0f,
1336 1.0f, 2.0f, 3.0f, 4.0f,
1337 1.0f, 2.0f, 3.0f, 4.0f,
1338 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001339 },
1340 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001341
1342 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001343 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001344 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1345 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1346 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1347 1.0540f, 1.7638f, 2.5385f, 2.3570f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001348 },
1349 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001350
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001351 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001352 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001353}
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001354
1355} // anonymous namespace
1356
1357LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
1358 armnn::IWorkloadFactory& workloadFactory,
1359 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1360 bool forceNoPadding)
1361{
1362 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1363 workloadFactory, memoryManager, forceNoPadding);
1364}
1365
1366LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
1367 armnn::IWorkloadFactory& workloadFactory,
1368 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1369 bool forceNoPadding)
1370{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001371 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001372 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1373}
1374
1375LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
1376 armnn::IWorkloadFactory& workloadFactory,
1377 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1378 bool forceNoPadding)
1379{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001380 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001381 workloadFactory, memoryManager, forceNoPadding);
1382}
1383
1384LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
1385 armnn::IWorkloadFactory& workloadFactory,
1386 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1387 bool forceNoPadding)
1388{
1389 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1390 workloadFactory, memoryManager, forceNoPadding);
1391}
1392
1393LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
1394 armnn::IWorkloadFactory& workloadFactory,
1395 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1396 bool forceNoPadding)
1397{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001398 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001399 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1400}
1401
1402LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
1403 armnn::IWorkloadFactory& workloadFactory,
1404 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1405 bool forceNoPadding)
1406{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001407 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001408 workloadFactory, memoryManager, forceNoPadding);
1409}
1410
1411LayerTestResult<float, 4> SimpleMaxPooling2dTest(
1412 armnn::IWorkloadFactory& workloadFactory,
1413 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1414 const armnn::DataLayout dataLayout)
1415{
1416 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1417}
1418
1419LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
1420 armnn::IWorkloadFactory& workloadFactory,
1421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1422 const armnn::DataLayout dataLayout)
1423{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001424 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001425}
1426
1427LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
1428 armnn::IWorkloadFactory& workloadFactory,
1429 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1430 const armnn::DataLayout dataLayout)
1431{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001432 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001433}
1434LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
1435 armnn::IWorkloadFactory& workloadFactory,
1436 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1437{
1438 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1439}
1440
1441LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
1442 armnn::IWorkloadFactory& workloadFactory,
1443 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1444{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001445 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001446 workloadFactory, memoryManager, 1.0f, -5);
1447}
1448
1449LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
1450 armnn::IWorkloadFactory& workloadFactory,
1451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1452{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001453 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001454 workloadFactory, memoryManager);
1455}
1456
1457LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
1458 armnn::IWorkloadFactory& workloadFactory,
1459 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1460{
1461 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1462}
1463
1464LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
1465 armnn::IWorkloadFactory& workloadFactory,
1466 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1467{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001468 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001469 workloadFactory, memoryManager, 1.0f, -5);
1470}
1471
1472LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
1473 armnn::IWorkloadFactory& workloadFactory,
1474 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1475{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001476 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001477 workloadFactory, memoryManager);
1478}
1479
1480LayerTestResult<float, 4> SimpleAveragePooling2dTest(
1481 armnn::IWorkloadFactory& workloadFactory,
1482 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1483 const armnn::DataLayout dataLayout)
1484{
1485 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1486}
1487
1488LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
1489 armnn::IWorkloadFactory& workloadFactory,
1490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1491 const armnn::DataLayout dataLayout)
1492{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001493 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001494 workloadFactory, memoryManager, dataLayout, 0.5, -1);
1495}
1496
1497LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
1498 armnn::IWorkloadFactory& workloadFactory,
1499 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1500 const armnn::DataLayout dataLayout)
1501{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001502 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001503 workloadFactory, memoryManager, dataLayout);
1504}
1505
1506LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
1507 armnn::IWorkloadFactory& workloadFactory,
1508 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1509 bool forceNoPadding)
1510{
1511 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1512 workloadFactory, memoryManager, forceNoPadding);
1513}
1514
1515LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
1516 armnn::IWorkloadFactory& workloadFactory,
1517 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1518{
1519 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1520}
1521
1522LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
1523 armnn::IWorkloadFactory& workloadFactory,
1524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1525{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001526 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001527 workloadFactory, memoryManager, 0.5, -1);
1528}
1529
1530LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
1531 armnn::IWorkloadFactory& workloadFactory,
1532 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1533{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001534 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001535 workloadFactory, memoryManager);
1536}
1537LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
1538 armnn::IWorkloadFactory& workloadFactory,
1539 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1540{
1541 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1542}
1543
1544LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
1545 armnn::IWorkloadFactory& workloadFactory,
1546 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1547{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001548 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001549 workloadFactory, memoryManager);
1550}
1551
1552LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
1553 armnn::IWorkloadFactory& workloadFactory,
1554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1555{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001556 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001557 workloadFactory, memoryManager);
1558}
1559
1560LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
1561 armnn::IWorkloadFactory& workloadFactory,
1562 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1563{
1564 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1565 workloadFactory, memoryManager);
1566}
1567
1568LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
1569 armnn::IWorkloadFactory& workloadFactory,
1570 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1571{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001572 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001573 workloadFactory, memoryManager);
1574}
1575
1576LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
1577 armnn::IWorkloadFactory& workloadFactory,
1578 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1579{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001580 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001581 workloadFactory, memoryManager);
1582}
1583
1584LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
1585 armnn::IWorkloadFactory& workloadFactory,
1586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1587{
1588 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1589}
1590
1591LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
1592 armnn::IWorkloadFactory& workloadFactory,
1593 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1594{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001595 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001596 workloadFactory, memoryManager);
1597}
1598
1599LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
1600 armnn::IWorkloadFactory& workloadFactory,
1601 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1602{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001603 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001604 workloadFactory, memoryManager);
1605}
1606
1607LayerTestResult<float, 4> SimpleL2Pooling2dTest(
1608 armnn::IWorkloadFactory& workloadFactory,
1609 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1610 const armnn::DataLayout dataLayout)
1611{
1612 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1613}
1614
1615LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
1616 armnn::IWorkloadFactory& workloadFactory,
1617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618 const armnn::DataLayout dataLayout)
1619{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001620 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001621}
1622
1623LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
1624 armnn::IWorkloadFactory& workloadFactory,
1625 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1626 const armnn::DataLayout dataLayout)
1627{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001628 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001629}
1630
1631LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
1632 armnn::IWorkloadFactory& workloadFactory,
1633 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1634{
1635 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1636}
1637
1638LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
1639 armnn::IWorkloadFactory& workloadFactory,
1640 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1641{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001642 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001643}
1644
1645LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
1646 armnn::IWorkloadFactory& workloadFactory,
1647 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1648{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001649 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001650}
1651
1652LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
1653 armnn::IWorkloadFactory& workloadFactory,
1654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1655{
1656 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1657}
1658
1659LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
1660 armnn::IWorkloadFactory& workloadFactory,
1661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1662{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001663 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001664}
1665
1666LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
1667 armnn::IWorkloadFactory& workloadFactory,
1668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1669{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001670 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001671}
1672LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
1673 armnn::IWorkloadFactory& workloadFactory,
1674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1675{
1676 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1677}
1678
1679LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
1680 armnn::IWorkloadFactory& workloadFactory,
1681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1682{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001683 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001684}
1685
1686LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
1687 armnn::IWorkloadFactory& workloadFactory,
1688 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1689{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001690 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001691}
1692
1693LayerTestResult<float, 4> L2Pooling2dSize7Test(
1694 armnn::IWorkloadFactory& workloadFactory,
1695 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1696{
1697 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1698}
1699
1700LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
1701 armnn::IWorkloadFactory& workloadFactory,
1702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1703{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001704 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001705}
1706
1707LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
1708 armnn::IWorkloadFactory& workloadFactory,
1709 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1710{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001711 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001712}
1713
1714LayerTestResult<float, 4> L2Pooling2dSize9Test(
1715 armnn::IWorkloadFactory& workloadFactory,
1716 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1717{
1718 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1719}
1720
1721LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
1722 armnn::IWorkloadFactory& workloadFactory,
1723 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1724{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001725 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001726}
1727
1728LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
1729 armnn::IWorkloadFactory& workloadFactory,
1730 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1731{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001732 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001733}
1734LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
1735 armnn::IWorkloadFactory& workloadFactory,
1736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1737{
1738 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1739}
1740
1741LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
1742 armnn::IWorkloadFactory& workloadFactory,
1743 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1744{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001745 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001746}
1747
1748LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
1749 armnn::IWorkloadFactory& workloadFactory,
1750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1751{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001752 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001753}
1754
1755LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
1756 armnn::IWorkloadFactory& workloadFactory,
1757 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1758{
1759 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1760}
1761
1762LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
1763 armnn::IWorkloadFactory& workloadFactory,
1764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1765{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001766 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001767}
1768
1769LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
1770 armnn::IWorkloadFactory& workloadFactory,
1771 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1772{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001773 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001774}
1775
1776LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
1777 armnn::IWorkloadFactory& workloadFactory,
1778 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1779{
1780 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1781}
1782
1783LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
1784 armnn::IWorkloadFactory& workloadFactory,
1785 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1786{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001787 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001788}
1789
1790LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
1791 armnn::IWorkloadFactory& workloadFactory,
1792 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1793{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001794 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001795}
1796
1797LayerTestResult<float, 4> ComparePooling2dTest(
1798 armnn::IWorkloadFactory& workloadFactory,
1799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1800 armnn::IWorkloadFactory& refWorkloadFactory,
1801 armnn::PoolingAlgorithm poolingType)
1802{
1803 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1804 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1805}
1806
1807LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
1808 armnn::IWorkloadFactory& workloadFactory,
1809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1810 armnn::IWorkloadFactory& refWorkloadFactory,
1811 armnn::PoolingAlgorithm poolingType)
1812{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001813 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001814 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1815}
1816
1817LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
1818 armnn::IWorkloadFactory& workloadFactory,
1819 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1820 armnn::IWorkloadFactory& refWorkloadFactory,
1821 armnn::PoolingAlgorithm poolingType)
1822{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001823 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001824 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1825}