blob: 078e74403ed96c08bb60fd5e19d9eef0ab9b8bab [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa014fcda012018-03-09 14:13:49 +00005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "Pooling2dTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010
11#include <armnn/LayerSupport.hpp>
12
13#include <armnnUtils/TensorUtils.hpp>
14#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016
Jan Eilers8eb25602020-03-09 12:13:48 +000017#include <armnn/utility/IgnoreUnused.hpp>
18
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/WorkloadInfo.hpp>
20
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010021#include <backendsCommon/test/TensorCopyUtils.hpp>
22#include <backendsCommon/test/WorkloadTestUtils.hpp>
23
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000024#include <test/TensorHelpers.hpp>
25
James Conroy45a9b772018-10-31 11:47:53 +000026#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000027
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028namespace
29{
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000030
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010031using namespace armnnUtils;
32
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000033template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000034LayerTestResult<T, 4> SimplePooling2dTestImpl(
35 armnn::IWorkloadFactory& workloadFactory,
36 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
37 armnn::Pooling2dDescriptor descriptor,
38 float qScale,
39 int32_t qOffset,
40 const boost::multi_array<T, 4>& input,
41 const boost::multi_array<T, 4>& outputExpected)
telsoa014fcda012018-03-09 14:13:49 +000042{
Jan Eilers8eb25602020-03-09 12:13:48 +000043 IgnoreUnused(memoryManager);
Matthew Bentham8800c002018-11-19 13:19:28 +000044 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
Matteo Martincigh21350152018-11-28 16:22:22 +000045 const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
Matthew Bentham8800c002018-11-19 13:19:28 +000046 auto heightIndex = dimensionIndices.GetHeightIndex();
47 auto widthIndex = dimensionIndices.GetWidthIndex();
48 auto channelsIndex = dimensionIndices.GetChannelsIndex();
telsoa014fcda012018-03-09 14:13:49 +000049
James Conroy69482272018-10-19 10:41:35 +010050 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
51 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
52 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
53 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
54
55 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
56 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
57 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
telsoa014fcda012018-03-09 14:13:49 +000058 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
59
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000060 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
61 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
62
63 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
64 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000065
66 // Set quantization parameters if the requested type is a quantized type.
67 if(armnn::IsQuantizedType<T>())
68 {
69 inputTensorInfo.SetQuantizationScale(qScale);
70 inputTensorInfo.SetQuantizationOffset(qOffset);
71 outputTensorInfo.SetQuantizationScale(qScale);
72 outputTensorInfo.SetQuantizationOffset(qOffset);
73 }
74
75 LayerTestResult<T, 4> result(outputTensorInfo);
76
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010077 ARMNN_NO_DEPRECATE_WARN_BEGIN
telsoa014fcda012018-03-09 14:13:49 +000078 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
79 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +010080 ARMNN_NO_DEPRECATE_WARN_END
telsoa014fcda012018-03-09 14:13:49 +000081
82 armnn::Pooling2dQueueDescriptor queueDescriptor;
83 queueDescriptor.m_Parameters = descriptor;
James Conroy45a9b772018-10-31 11:47:53 +000084 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +010085
86 armnn::WorkloadInfo workloadInfo;
87 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
88 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
89
90 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +010091 armnn::BackendId backend = workloadFactory.GetBackendId();
Francis Murtagh043d0d02018-10-05 14:08:48 +010092 const size_t reasonIfUnsupportedMaxLen = 255;
93 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +010094 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
Francis Murtagh043d0d02018-10-05 14:08:48 +010095 queueDescriptor.m_Parameters,
96 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
97 if (!result.supported)
98 {
99 return result;
100 }
101
102 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
103
104 inputHandle->Allocate();
105 outputHandle->Allocate();
106
107 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
108
109 workload->Execute();
110
111 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
112
113 result.outputExpected = outputExpected;
114
115 return result;
116}
117
telsoa014fcda012018-03-09 14:13:49 +0000118//
119// Tests max pooling with the following parameters:
120//
121// Pooling size: 3x3
122// Stride: (2,4)
123// input size: 8x13
124// channels: 2
125// batch size: 2
126//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000127template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000128LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
129 armnn::IWorkloadFactory& workloadFactory,
130 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
131 bool forceNoPadding,
132 float qScale = 1.0f,
133 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000134{
135 armnn::Pooling2dDescriptor descriptor;
136 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
137 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
138 descriptor.m_StrideX = 2;
139 descriptor.m_StrideY = 4;
140 // forceNoPadding is mainly used for compatibility with ARM Compute.
141 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
142 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
143 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
144 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
145 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
146
147 unsigned int inputWidth = 8;
148 unsigned int inputHeight = 13;
149 unsigned int outputWidth =
150 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
151 descriptor.m_StrideX;
152 unsigned int outputHeight =
153 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
154 descriptor.m_StrideY;
155 unsigned int channels = 2;
156 unsigned int batchSize = 2;
157
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000158 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
159 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000160
161 // Set quantization parameters if the requested type is a quantized type.
162 if(armnn::IsQuantizedType<T>())
163 {
164 inputTensorInfo.SetQuantizationScale(qScale);
165 inputTensorInfo.SetQuantizationOffset(qOffset);
166 outputTensorInfo.SetQuantizationScale(qScale);
167 outputTensorInfo.SetQuantizationOffset(qOffset);
168 }
169
170 std::vector<float> singleChannelData({
171 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
172 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
173 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
174 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
175 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
176 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
177 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
178 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
179 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
180 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
181 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
182 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
183 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
184 });
185
telsoa01c577f2c2018-08-31 09:22:23 +0100186 // Constructs input data.
telsoa014fcda012018-03-09 14:13:49 +0000187 std::vector<float> inputData;
188 auto negator = [](float f) { return -f; };
189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // First image (two channels where the second channel is the negative of the first one).
telsoa014fcda012018-03-09 14:13:49 +0000191 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
192 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
193
telsoa01c577f2c2018-08-31 09:22:23 +0100194 // Second image (same as first image).
telsoa014fcda012018-03-09 14:13:49 +0000195 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
196 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
197
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100198 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000199
telsoa01c577f2c2018-08-31 09:22:23 +0100200 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000201 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
202 boost::multi_array<T, 4> outputExpected(shape);
203 if (forceNoPadding)
204 {
205 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100206 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000207 8.0f, 8.0f, 8.0f,
208 9.0f, 7.0f, 9.0f,
209 9.0f, 9.0f, 9.0f,
210
211 0.0f, 0.0f, -3.0f,
212 -1.0f, 0.0f, 0.0f,
213 -1.0f, -1.0f, -1.0f,
214
215 8.0f, 8.0f, 8.0f,
216 9.0f, 7.0f, 9.0f,
217 9.0f, 9.0f, 9.0f,
218
219 0.0f, 0.0f, -3.0f,
220 -1.0f, 0.0f, 0.0f,
221 -1.0f, -1.0f, -1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100222 },
223 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000224 }
225 else
226 {
227 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100228 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000229 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
230 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
231 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
232
Finn Williams70f609b2019-11-06 16:54:53 +0000233 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
234 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
235 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
telsoa014fcda012018-03-09 14:13:49 +0000236
237 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
238 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
239 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
240
Finn Williams70f609b2019-11-06 16:54:53 +0000241 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
242 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
243 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100244 },
245 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000246 }
247
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000248 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000249 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000250}
251
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000252template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000253LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
254 armnn::IWorkloadFactory& workloadFactory,
255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000256 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000257 float qScale = 1.0f,
258 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000259{
260 armnn::Pooling2dDescriptor descriptor;
James Conroy45a9b772018-10-31 11:47:53 +0000261 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
telsoa014fcda012018-03-09 14:13:49 +0000262 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
263 descriptor.m_StrideX = descriptor.m_StrideY = 2;
telsoa014fcda012018-03-09 14:13:49 +0000264 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy69482272018-10-19 10:41:35 +0100265 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000267 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
268 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000269
270 // Set quantization parameters if the requested type is a quantized type.
271 if(armnn::IsQuantizedType<T>())
272 {
273 inputTensorInfo.SetQuantizationScale(qScale);
274 inputTensorInfo.SetQuantizationOffset(qOffset);
275 outputTensorInfo.SetQuantizationScale(qScale);
276 outputTensorInfo.SetQuantizationOffset(qOffset);
277 }
278
James Conroy45a9b772018-10-31 11:47:53 +0000279 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100280 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000281 1.0f, 2.0f, 5.0f, 6.0f,
282 3.0f, 4.0f, 7.0f, 8.0f,
283 9.0f, 10.0f, 13.0f, 14.0f,
284 11.0f, 12.0f, 15.0f, 16.0f,
285
286 17.0f, 18.0f, 21.0f, 22.0f,
287 19.0f, 20.0f, 23.0f, 24.0f,
288 25.0f, 26.0f, 29.0f, 30.0f,
289 27.0f, 28.0f, 31.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100290 },
291 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000292
James Conroy45a9b772018-10-31 11:47:53 +0000293 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100294 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000295 4.0f, 8.0f,
296 12.0f, 16.0f,
297
298 20.0f, 24.0f,
299 28.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100300 },
301 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000302
James Conroy45a9b772018-10-31 11:47:53 +0000303 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000304 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000305 {
306 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000307 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000308 inputData = tmp;
309
310 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000311 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000312 outputData = tmp1;
313 }
314
315 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
316
317 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
318
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000320 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000321}
322
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000323template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000324LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
325 armnn::IWorkloadFactory& workloadFactory,
326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000327 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000328 float qScale = 1.0f,
329 int32_t qOffset = 0)
Francis Murtagh043d0d02018-10-05 14:08:48 +0100330{
James Conroy45a9b772018-10-31 11:47:53 +0000331 armnn::Pooling2dDescriptor descriptor;
332 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
333 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
334 descriptor.m_StrideX = descriptor.m_StrideY = 2;
335 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
336 descriptor.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +0100337
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000338 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
339 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100340
James Conroy45a9b772018-10-31 11:47:53 +0000341 // Set quantization parameters if the requested type is a quantized type.
342 if(armnn::IsQuantizedType<T>())
343 {
344 inputTensorInfo.SetQuantizationScale(qScale);
345 inputTensorInfo.SetQuantizationOffset(qOffset);
346 outputTensorInfo.SetQuantizationScale(qScale);
347 outputTensorInfo.SetQuantizationOffset(qOffset);
348 }
Francis Murtagh043d0d02018-10-05 14:08:48 +0100349
James Conroy45a9b772018-10-31 11:47:53 +0000350 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100351 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000352 2.0f, 2.0f, 6.0f, 6.0f,
353 4.0f, 4.0f, 8.0f, 8.0f,
354 10.0f, 12.0f, 14.0f, 16.0f,
355 10.0f, 12.0f, 16.0f, 14.0f,
356
357 18.0f, 20.0f, 24.0f, 22.0f,
358 20.0f, 18.0f, 22.0f, 24.0f,
359 26.0f, 28.0f, 0.0f, 0.0f,
360 26.0f, 28.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100361 },
362 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000363
364 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100365 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000366 3.0f, 7.0f,
367 11.0f, 15.0f,
368
369 19.0f, 23.0f,
370 27.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100371 },
372 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000373
374 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000375 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000376 {
377 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000378 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000379 inputData = tmp;
380
381 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000382 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000383 outputData = tmp1;
384 }
385
386 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
387
388 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
389
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000390 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000391 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100392}
393
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000394template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000395LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
396 armnn::IWorkloadFactory& workloadFactory,
397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
398 float qScale = 1.0f,
399 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000400{
401 armnn::Pooling2dDescriptor descriptor;
402 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
403 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
404 descriptor.m_StrideX = descriptor.m_StrideY = 5;
405 descriptor.m_PadLeft = 50;
406 descriptor.m_PadRight = 50;
407 descriptor.m_PadTop = 50;
408 descriptor.m_PadBottom = 50;
409 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
410
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000411 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
412 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000413
414 // Set quantization parameters if the requested type is a quantized type.
415 if(armnn::IsQuantizedType<T>())
416 {
417 inputTensorInfo.SetQuantizationScale(qScale);
418 inputTensorInfo.SetQuantizationOffset(qOffset);
419 outputTensorInfo.SetQuantizationScale(qScale);
420 outputTensorInfo.SetQuantizationOffset(qOffset);
421 }
422
423 std::vector<T> inputVec;
424
425 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
426 {
427 inputVec.push_back(1);
428 }
429
430 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
431
432 std::vector<T> outputVec;
433
434 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
435 {
436 outputVec.push_back(1);
437 }
438
439 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
440
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000441 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000442 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000443}
444
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000445template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000446LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
447 armnn::IWorkloadFactory& workloadFactory,
448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Matthew Bentham8800c002018-11-19 13:19:28 +0000449 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000450 float qScale = 1.0f,
451 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000452{
453 armnn::Pooling2dDescriptor descriptor;
454 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
455 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
456 descriptor.m_StrideX = descriptor.m_StrideY = 2;
457 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy45a9b772018-10-31 11:47:53 +0000458 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000459
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000460 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
461 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
James Conroy45a9b772018-10-31 11:47:53 +0000462
463 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100464 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000465 1.0f, 7.0f, 5.0f, 5.0f,
466 1.0f, 7.0f, 5.0f, 5.0f,
467 3.0f, 3.0f, 1.0f, 1.0f,
468 3.0f, 3.0f, 1.0f, 1.0f,
469
470 1.0f, 7.0f, 0.0f, 0.0f,
471 1.0f, 7.0f, 2.0f, 0.0f,
472 0.0f, 2.0f, 1.0f, 1.0f,
473 0.0f, 0.0f, 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100474 },
475 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000476
James Conroy45a9b772018-10-31 11:47:53 +0000477 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100478 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000479 5.0f, 5.0f,
James Conroy45a9b772018-10-31 11:47:53 +0000480 3.0f, 1.0f,
481
482 5.0f, 1.0f,
483 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100484 },
485 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000486
James Conroy45a9b772018-10-31 11:47:53 +0000487 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000488 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000489 {
490 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000491 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000492 inputData = tmp;
493
494 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000495 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000496 outputData = tmp1;
497 }
498
499 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
500
501 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
502
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000503 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000504 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000505}
506
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000507template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000508LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
509 armnn::IWorkloadFactory& workloadFactory,
510 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
511 float qScale = 1.0f,
512 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000513{
514 armnn::Pooling2dDescriptor descriptor;
515 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
516 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
517 descriptor.m_StrideX = descriptor.m_StrideY = 1;
518 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
519
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000520 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000521 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100522 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000523 2.0f, 1.0f, 5.0f, 2.0f,
524 1.0f, 2.0f, 2.0f, 1.0f,
525 5.0f, 4.0f, 1.0f, 5.0f,
526 2.0f, 1.0f, 5.0f, 2.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100527 },
528 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000529
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000530 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000531 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100532 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000533 3.0f, 3.0f,
534 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100535 },
536 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000537
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000538 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000539 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000540}
541
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000542template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000543LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
544 armnn::IWorkloadFactory& workloadFactory,
545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
546 float qScale = 1.0f,
547 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000548{
549 armnn::Pooling2dDescriptor descriptor;
550 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
551 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
552 descriptor.m_StrideX = descriptor.m_StrideY = 3;
553 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
554
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000555 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000556 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100557 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000558 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
559 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
560 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
561 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
562 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
563 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
564 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
565 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
566 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100567 },
568 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000569
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000570 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000571 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100572 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000573 3.0f, 3.0f, 3.0f,
574 3.0f, 3.0f, 3.0f,
575 3.0f, 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100576 },
577 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000578
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000579 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000580 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000581}
582
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000583template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000584LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
585 armnn::IWorkloadFactory& workloadFactory,
586 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
587 float qScale = 1.0f,
588 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000589{
590 armnn::Pooling2dDescriptor descriptor;
591 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
592 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
593 descriptor.m_StrideX = descriptor.m_StrideY = 4;
594 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
595
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000596 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000597 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100598 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000599 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
600 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
601 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
602 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
603 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
604 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
605 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100606 },
607 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000608
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000609 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000610 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100611 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000612 3.0f, 3.0f,
613 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100614 },
615 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000616
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000617 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000618 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000619}
620
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000621template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000622LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
623 armnn::IWorkloadFactory& workloadFactory,
624 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
625 float qScale = 1.0f,
626 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000627{
628 armnn::Pooling2dDescriptor descriptor;
629 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
630 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
631 descriptor.m_StrideX = descriptor.m_StrideY = 7;
632 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
633
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000634 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000635 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100636 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000637 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
638 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
639 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
640 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
641 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
642 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
643 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100644 },
645 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000646
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000647 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000648 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100649 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000650 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100651 },
652 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000653
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000654 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000655 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000656}
657
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000658template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000659LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
660 armnn::IWorkloadFactory& workloadFactory,
661 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
662 float qScale = 1.0f,
663 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000664{
665 armnn::Pooling2dDescriptor descriptor;
666 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
667 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
668 descriptor.m_StrideX = descriptor.m_StrideY = 9;
669 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
670
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000671 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000672 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100673 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000674 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
675 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
676 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
677 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
678 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
679 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
680 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
681 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
682 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100683 },
684 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000685
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000686 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000687 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100688 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000689 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100690 },
691 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000692
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000693 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000694 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000695}
696
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000697template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000698LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
699 armnn::IWorkloadFactory& workloadFactory,
700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
701 float qScale = 1.0f,
702 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000703{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000704 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
705 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000706
707 armnn::Pooling2dDescriptor descriptor;
708 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
709 descriptor.m_PoolWidth = 2;
710 descriptor.m_PoolHeight = 3;
711 descriptor.m_StrideX = 2;
712 descriptor.m_StrideY = 1;
713 descriptor.m_PadLeft = 2;
714 descriptor.m_PadRight = 0;
715 descriptor.m_PadTop = 1;
716 descriptor.m_PadBottom = 2;
717 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
718 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
719
telsoa01c577f2c2018-08-31 09:22:23 +0100720 // Construct input data.
telsoa014fcda012018-03-09 14:13:49 +0000721 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100722 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000723 1.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100724 },
725 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000726
telsoa01c577f2c2018-08-31 09:22:23 +0100727 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000728 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100729 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000730 0.0f, 3.0f, 0.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100731 },
732 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000733
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000734 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000735 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000736}
737
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000738template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000739LayerTestResult<T, 4> ComparePooling2dTestCommon(
740 armnn::IWorkloadFactory& workloadFactory,
741 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
742 armnn::IWorkloadFactory& refWorkloadFactory,
743 armnn::PoolingAlgorithm poolingType,
744 float qScale = 1.0f,
745 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000746{
Jan Eilers8eb25602020-03-09 12:13:48 +0000747 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000748 const unsigned int inputWidth = 16;
749 const unsigned int inputHeight = 32;
750 const unsigned int channelCount = 2;
751 const unsigned int batchSize = 5;
752
753 const unsigned int poolSize = 3;
754 const unsigned int strideX = 2;
755 const unsigned int strideY = 4;
756 const unsigned int padX = 0;
757 const unsigned int padY = 0;
758
759 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
760 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
761
762 armnn::TensorInfo inputTensorInfo;
763 armnn::TensorInfo outputTensorInfo;
764
765 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
766 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
767
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000768 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
769 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000770
771 // Set quantization parameters if the requested type is a quantized type.
772 if(armnn::IsQuantizedType<T>())
773 {
774 inputTensorInfo.SetQuantizationScale(qScale);
775 inputTensorInfo.SetQuantizationOffset(qOffset);
776 outputTensorInfo.SetQuantizationScale(qScale);
777 outputTensorInfo.SetQuantizationOffset(qOffset);
778 }
779
780 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
781
782 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
783
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100784 ARMNN_NO_DEPRECATE_WARN_BEGIN
telsoa014fcda012018-03-09 14:13:49 +0000785 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
786 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100787 ARMNN_NO_DEPRECATE_WARN_END
telsoa014fcda012018-03-09 14:13:49 +0000788
789 armnn::Pooling2dQueueDescriptor data;
790 armnn::WorkloadInfo info;
791 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
792 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
793 data.m_Parameters.m_PoolType = poolingType;
794 data.m_Parameters.m_PoolWidth = poolSize;
795 data.m_Parameters.m_PoolHeight = poolSize;
796 data.m_Parameters.m_StrideX = strideX;
797 data.m_Parameters.m_StrideY = strideY;
798 data.m_Parameters.m_PadLeft = padX;
799 data.m_Parameters.m_PadRight = padX;
800 data.m_Parameters.m_PadTop = padY;
801 data.m_Parameters.m_PadBottom = padY;
802 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
803
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100804 ARMNN_NO_DEPRECATE_WARN_BEGIN
telsoa014fcda012018-03-09 14:13:49 +0000805 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
806 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +0100807 ARMNN_NO_DEPRECATE_WARN_END
telsoa014fcda012018-03-09 14:13:49 +0000808
809 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100810 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000811 const size_t reasonIfUnsupportedMaxLen = 255;
812 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100813 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
telsoa014fcda012018-03-09 14:13:49 +0000814 data.m_Parameters,
815 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
816 if (!comparisonResult.supported)
817 {
818 return comparisonResult;
819 }
820
821 armnn::Pooling2dQueueDescriptor refData = data;
822 armnn::WorkloadInfo refInfo = info;
823 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
824 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
825
826 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
827 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
828
829 outputHandleRef->Allocate();
830 inputHandleRef->Allocate();
831 inputHandle->Allocate();
832 outputHandle->Allocate();
833
834 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
835 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
836
837 workload->Execute();
838 workloadRef->Execute();
839
840 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
841 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
842
843 return comparisonResult;
844}
845
846//
847// Tests max pooling with the following parameters:
848//
849// Pooling size: 2x2
850// Stride: (2,2)
851// input size: 4x4
852// channels: 1
853// batch size: 1
854//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000855template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000856LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
857 armnn::IWorkloadFactory& workloadFactory,
858 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
859 bool forceNoPadding,
860 float qScale = 1.0f,
861 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000862{
863 armnn::Pooling2dDescriptor descriptor;
864 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
865 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
866 descriptor.m_StrideX = 2;
867 descriptor.m_StrideY = 2;
868 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
869 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
870 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
871 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
872
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000873
telsoa014fcda012018-03-09 14:13:49 +0000874 unsigned int inputWidth = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000875
telsoa014fcda012018-03-09 14:13:49 +0000876 unsigned int inputHeight = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000877
telsoa014fcda012018-03-09 14:13:49 +0000878 unsigned int outputWidth =
879 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
880 descriptor.m_StrideX;
881 unsigned int outputHeight =
882 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
883 descriptor.m_StrideY;
884 unsigned int channels = 1;
885 unsigned int batchSize = 1;
886
887 std::vector<float> inputData = {
888 510.0f, 222.0f, 780.0f, 654.0f,
889 141.0f, 276.0f, 15.0f, 546.0f,
890 303.0f, 618.0f, 582.0f, 339.0f,
891 438.0f, 564.0f, 573.0f, 402.0f
892 };
893
telsoa01c577f2c2018-08-31 09:22:23 +0100894 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
telsoa014fcda012018-03-09 14:13:49 +0000895 std::vector<float> expectedOutputDataWithPadding = {
896 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
897 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
898 };
899
900 std::vector<float> expectedOutputDataNoPadding = {
901 510.0f, 780.0f,
902 618.0f, 582.0f
903 };
904
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000905 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000906
907 // Scale and offset should match input - we're just calculating maximum values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000908 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000909
910 // Set quantization parameters if the requested type is a quantized type.
911 if(armnn::IsQuantizedType<T>())
912 {
913 inputTensorInfo.SetQuantizationScale(qScale);
914 inputTensorInfo.SetQuantizationOffset(qOffset);
915 outputTensorInfo.SetQuantizationScale(qScale);
916 outputTensorInfo.SetQuantizationOffset(qOffset);
917 }
918
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100919 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000920
921 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100922 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
923 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000924
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000925 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000926 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000927}
928
surmeh01bceff2f2018-03-29 16:29:27 +0100929//
930// Tests max pooling with the following parameters:
931//
932// Pooling size: 3x2
933// Stride: (2,2)
934// input size: 3x2
935// channels: 1
936// batch size: 1
937//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000938template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
surmeh01bceff2f2018-03-29 16:29:27 +0100939LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
940 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
surmeh01bceff2f2018-03-29 16:29:27 +0100942 bool forceNoPadding,
943 float qScale = 1.0f,
944 int32_t qOffset = 0)
945{
946 armnn::Pooling2dDescriptor descriptor;
947 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
948 descriptor.m_PoolWidth = 3;
949 descriptor.m_PoolHeight = 2;
950 descriptor.m_StrideX = 2;
951 descriptor.m_StrideY = 2;
952 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
953 descriptor.m_PadRight = descriptor.m_PadLeft;
954 descriptor.m_PadTop = 0;
955 descriptor.m_PadBottom = 0;
956 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
957 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
958
959 unsigned int inputWidth = 3;
960 unsigned int inputHeight = 2;
961 unsigned int outputWidth =
962 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
963 descriptor.m_StrideX;
964 unsigned int outputHeight =
965 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
966 descriptor.m_StrideY;
967 unsigned int channels = 1;
968 unsigned int batchSize = 1;
969
970 std::vector<float> inputData = {
971 3.0f, 6.0f, 9.0f,
972 12.0f, 15.0f, 18.0f,
973 };
974
975 std::vector<float> expectedOutputDataWithPadding = {
976 6.0f, 8.0f,
977 };
978
979 std::vector<float> expectedOutputDataNoPadding = {
980 10.5f,
981 };
982
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000983 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100984
985 // Scale and offset should match input - we're just calculating average values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000986 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100987
988 // Set quantization parameters if the requested type is a quantized type.
989 if(armnn::IsQuantizedType<T>())
990 {
991 inputTensorInfo.SetQuantizationScale(qScale);
992 inputTensorInfo.SetQuantizationOffset(qOffset);
993 outputTensorInfo.SetQuantizationScale(qScale);
994 outputTensorInfo.SetQuantizationOffset(qOffset);
995 }
996
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100997 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +0100998
999 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001000 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1001 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +01001002
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001003 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001004 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
surmeh01bceff2f2018-03-29 16:29:27 +01001005}
1006
1007
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001008template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001009LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1010 armnn::IWorkloadFactory& workloadFactory,
1011 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1012 float qScale = 1.0f,
1013 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001014{
1015 armnn::Pooling2dDescriptor descriptor;
1016 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1017 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1018 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1019 descriptor.m_PadLeft = 1;
1020 descriptor.m_PadRight = 1;
1021 descriptor.m_PadTop = 1;
1022 descriptor.m_PadBottom = 1;
1023 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1024
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001025 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1026 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001027
1028 // Set quantization parameters if the requested type is a quantized type.
1029 if(armnn::IsQuantizedType<T>())
1030 {
1031 inputTensorInfo.SetQuantizationScale(qScale);
1032 inputTensorInfo.SetQuantizationOffset(qOffset);
1033 outputTensorInfo.SetQuantizationScale(qScale);
1034 outputTensorInfo.SetQuantizationOffset(qOffset);
1035 }
1036
1037 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001038 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001039 -1.0f, -2.0f, 3.0f, 4.0f,
1040 -1.0f, -2.0f, 3.0f, 4.0f,
1041 1.0f, 2.0f, -3.0f, -4.0f,
1042 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001043 },
1044 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001045
1046 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001047 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001048 -1.0f, 3.0f, 4.0f,
1049 1.0f, 3.0f, 4.0f,
1050 1.0f, 2.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001051 },
1052 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001053
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001054 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001055 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001056}
1057
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001058template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001059LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1060 armnn::IWorkloadFactory& workloadFactory,
1061 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1062 float qScale = 1.0f,
1063 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001064{
1065 armnn::Pooling2dDescriptor descriptor;
1066 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1067 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1068 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1069 descriptor.m_PadLeft = 1;
1070 descriptor.m_PadRight = 1;
1071 descriptor.m_PadTop = 1;
1072 descriptor.m_PadBottom = 1;
1073 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1074
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001075 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1076 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001077
1078 // Set quantization parameters if the requested type is a quantized type.
1079 if(armnn::IsQuantizedType<T>())
1080 {
1081 inputTensorInfo.SetQuantizationScale(qScale);
1082 inputTensorInfo.SetQuantizationOffset(qOffset);
1083 outputTensorInfo.SetQuantizationScale(qScale);
1084 outputTensorInfo.SetQuantizationOffset(qOffset);
1085 }
1086
1087 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001088 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001089 -1.0f, -2.0f, 3.0f, 4.0f,
1090 -1.0f, -2.0f, 3.0f, 4.0f,
1091 1.0f, 2.0f, -3.0f, -4.0f,
1092 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001093 },
1094 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001095
1096 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001097 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001098 -1.0f, 3.0f, 4.0f, 4.0f,
1099 2.0f, 3.0f, 4.0f, 4.0f,
1100 2.0f, 3.0f, 4.0f, 4.0f,
1101 2.0f, 2.0f, 2.0f, -3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001102 },
1103 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001104
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001105 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001106 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001107}
1108
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001109template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001110LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1111 armnn::IWorkloadFactory& workloadFactory,
1112 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1113 float qScale = 1.0f,
1114 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001115{
1116 armnn::Pooling2dDescriptor descriptor;
1117 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1118 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1119 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1120 descriptor.m_PadLeft = 1;
1121 descriptor.m_PadRight = 1;
1122 descriptor.m_PadTop = 1;
1123 descriptor.m_PadBottom = 1;
1124 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1125
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001126 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1127 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001128
1129 // Set quantization parameters if the requested type is a quantized type.
1130 if(armnn::IsQuantizedType<T>())
1131 {
1132 inputTensorInfo.SetQuantizationScale(qScale);
1133 inputTensorInfo.SetQuantizationOffset(qOffset);
1134 outputTensorInfo.SetQuantizationScale(qScale);
1135 outputTensorInfo.SetQuantizationOffset(qOffset);
1136 }
1137
1138 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001139 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001140 12.0f, 20.0f, 32.0f, 40.0f,
1141 12.0f, 20.0f, 32.0f, 40.0f,
1142 12.0f, 20.0f, 32.0f, 40.0f,
1143 12.0f, 20.0f, 32.0f, 40.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001144 },
1145 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001146
1147 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001148 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001149 3.0f, 13.0f, 10.0f,
1150 6.0f, 26.0f, 20.0f,
1151 3.0f, 13.0f, 10.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001152 },
1153 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001154
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001155 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001156 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001157}
1158
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001159template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001160LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1161 armnn::IWorkloadFactory& workloadFactory,
1162 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1163 float qScale = 1.0f,
1164 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001165{
1166 armnn::Pooling2dDescriptor descriptor;
1167 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1168 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1169 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1170 descriptor.m_PadLeft = 0;
1171 descriptor.m_PadRight = 0;
1172 descriptor.m_PadTop = 0;
1173 descriptor.m_PadBottom = 0;
1174 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1175 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1176
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001177 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1178 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001179
1180 // Set quantization parameters if the requested type is a quantized type.
1181 if(armnn::IsQuantizedType<T>())
1182 {
1183 inputTensorInfo.SetQuantizationScale(qScale);
1184 inputTensorInfo.SetQuantizationOffset(qOffset);
1185 outputTensorInfo.SetQuantizationScale(qScale);
1186 outputTensorInfo.SetQuantizationOffset(qOffset);
1187 }
1188
1189 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001190 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001191 1.0f, 2.0f, 3.0f, 4.0f,
1192 1.0f, 2.0f, 3.0f, 4.0f,
1193 1.0f, 2.0f, 3.0f, 4.0f,
1194 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001195 },
1196 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001197
1198 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001199 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001200 2.0f, 3.5f,
1201 2.0f, 3.5f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001202 },
1203 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001204
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001205 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001206 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001207}
1208
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001209template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001210LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1211 armnn::IWorkloadFactory& workloadFactory,
1212 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1213 float qScale = 1.0f,
1214 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001215{
1216 armnn::Pooling2dDescriptor descriptor;
1217 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1218 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1219 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1220 descriptor.m_PadLeft = 1;
1221 descriptor.m_PadRight = 1;
1222 descriptor.m_PadTop = 1;
1223 descriptor.m_PadBottom = 1;
1224 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1225
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001226 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1227 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001228
1229 // Set quantization parameters if the requested type is a quantized type.
1230 if(armnn::IsQuantizedType<T>())
1231 {
1232 inputTensorInfo.SetQuantizationScale(qScale);
1233 inputTensorInfo.SetQuantizationOffset(qOffset);
1234 outputTensorInfo.SetQuantizationScale(qScale);
1235 outputTensorInfo.SetQuantizationOffset(qOffset);
1236 }
1237
1238 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001239 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001240 9.0f, 27.0f, 18.0f, 36.0f,
1241 18.0f, 9.0f, 18.0f, 9.0f,
1242 27.0f, 18.0f, 9.0f, 27.0f,
1243 9.0f, 27.0f, 9.0f, 18.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001244 },
1245 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001246
1247 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001248 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001249 7.0f, 11.0f, 13.0f, 9.0f,
1250 12.0f, 17.0f, 19.0f, 13.0f,
1251 12.0f, 16.0f, 16.0f, 10.0f,
1252 9.0f, 11.0f, 12.0f, 7.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001253 },
1254 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001255
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001256 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001257 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001258}
1259
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001260template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001261LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1262 armnn::IWorkloadFactory& workloadFactory,
1263 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1264 float qScale = 1.0f,
1265 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001266{
1267 armnn::Pooling2dDescriptor descriptor;
1268 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1269 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1270 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1271 descriptor.m_PadLeft = 1;
1272 descriptor.m_PadRight = 1;
1273 descriptor.m_PadTop = 1;
1274 descriptor.m_PadBottom = 1;
1275 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1276
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001277 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1278 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001279
1280 // Set quantization parameters if the requested type is a quantized type.
1281 if(armnn::IsQuantizedType<T>())
1282 {
1283 inputTensorInfo.SetQuantizationScale(qScale);
1284 inputTensorInfo.SetQuantizationOffset(qOffset);
1285 outputTensorInfo.SetQuantizationScale(qScale);
1286 outputTensorInfo.SetQuantizationOffset(qOffset);
1287 }
1288
1289 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001290 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001291 2.0f, 4.0f, 8.0f, 16.0f,
1292 4.0f, 2.0f, 2.0f, 4.0f,
1293 8.0f, 2.0f, 4.0f, 2.0f,
1294 16.0f, 2.0f, 2.0f, 8.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001295 },
1296 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001297
1298 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001299 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001300 1.0f, 4.4721f, 8.0f,
1301 4.4721f, 2.6457f, 2.236f,
1302 8.0f, 1.4142f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001303 },
1304 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001305
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001306 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001307 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001308}
1309
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001310template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001311LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1312 armnn::IWorkloadFactory& workloadFactory,
1313 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1314 float qScale = 1.0f,
1315 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001316{
1317 armnn::Pooling2dDescriptor descriptor;
1318 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1319 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1320 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1321 descriptor.m_PadLeft = 1;
1322 descriptor.m_PadRight = 1;
1323 descriptor.m_PadTop = 1;
1324 descriptor.m_PadBottom = 1;
1325 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1326
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001327 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1328 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001329
1330 // Set quantization parameters if the requested type is a quantized type.
1331 if(armnn::IsQuantizedType<T>())
1332 {
1333 inputTensorInfo.SetQuantizationScale(qScale);
1334 inputTensorInfo.SetQuantizationOffset(qOffset);
1335 outputTensorInfo.SetQuantizationScale(qScale);
1336 outputTensorInfo.SetQuantizationOffset(qOffset);
1337 }
1338
1339 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001340 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001341 1.0f, 2.0f, 3.0f, 4.0f,
1342 1.0f, 2.0f, 3.0f, 4.0f,
1343 1.0f, 2.0f, 3.0f, 4.0f,
1344 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001345 },
1346 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001347
1348 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001349 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001350 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1351 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1352 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1353 1.0540f, 1.7638f, 2.5385f, 2.3570f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001354 },
1355 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001356
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001357 return SimplePooling2dTestImpl<ArmnnType>(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001358 workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001359}
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001360
1361} // anonymous namespace
1362
1363LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
1364 armnn::IWorkloadFactory& workloadFactory,
1365 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1366 bool forceNoPadding)
1367{
1368 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
1369 workloadFactory, memoryManager, forceNoPadding);
1370}
1371
1372LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
1373 armnn::IWorkloadFactory& workloadFactory,
1374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1375 bool forceNoPadding)
1376{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001377 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001378 workloadFactory, memoryManager, forceNoPadding, 3.0f, -5);
1379}
1380
1381LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
1382 armnn::IWorkloadFactory& workloadFactory,
1383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1384 bool forceNoPadding)
1385{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001386 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001387 workloadFactory, memoryManager, forceNoPadding);
1388}
1389
1390LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
1391 armnn::IWorkloadFactory& workloadFactory,
1392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1393 bool forceNoPadding)
1394{
1395 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
1396 workloadFactory, memoryManager, forceNoPadding);
1397}
1398
1399LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
1400 armnn::IWorkloadFactory& workloadFactory,
1401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1402 bool forceNoPadding)
1403{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001404 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001405 workloadFactory, memoryManager, forceNoPadding, 0.1f, 128);
1406}
1407
1408LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
1409 armnn::IWorkloadFactory& workloadFactory,
1410 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1411 bool forceNoPadding)
1412{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001413 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001414 workloadFactory, memoryManager, forceNoPadding);
1415}
1416
1417LayerTestResult<float, 4> SimpleMaxPooling2dTest(
1418 armnn::IWorkloadFactory& workloadFactory,
1419 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1420 const armnn::DataLayout dataLayout)
1421{
1422 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1423}
1424
1425LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
1426 armnn::IWorkloadFactory& workloadFactory,
1427 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1428 const armnn::DataLayout dataLayout)
1429{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001430 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001431}
1432
1433LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
1434 armnn::IWorkloadFactory& workloadFactory,
1435 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1436 const armnn::DataLayout dataLayout)
1437{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001438 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001439}
1440LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
1441 armnn::IWorkloadFactory& workloadFactory,
1442 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1443{
1444 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1445}
1446
1447LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
1448 armnn::IWorkloadFactory& workloadFactory,
1449 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1450{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001451 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001452 workloadFactory, memoryManager, 1.0f, -5);
1453}
1454
1455LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
1456 armnn::IWorkloadFactory& workloadFactory,
1457 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1458{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001459 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001460 workloadFactory, memoryManager);
1461}
1462
1463LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
1464 armnn::IWorkloadFactory& workloadFactory,
1465 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1466{
1467 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1468}
1469
1470LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
1471 armnn::IWorkloadFactory& workloadFactory,
1472 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1473{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001474 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001475 workloadFactory, memoryManager, 1.0f, -5);
1476}
1477
1478LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
1479 armnn::IWorkloadFactory& workloadFactory,
1480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1481{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001482 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001483 workloadFactory, memoryManager);
1484}
1485
1486LayerTestResult<float, 4> SimpleAveragePooling2dTest(
1487 armnn::IWorkloadFactory& workloadFactory,
1488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1489 const armnn::DataLayout dataLayout)
1490{
1491 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1492}
1493
1494LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
1495 armnn::IWorkloadFactory& workloadFactory,
1496 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1497 const armnn::DataLayout dataLayout)
1498{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001499 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001500 workloadFactory, memoryManager, dataLayout, 0.5, -1);
1501}
1502
1503LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
1504 armnn::IWorkloadFactory& workloadFactory,
1505 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1506 const armnn::DataLayout dataLayout)
1507{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001508 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001509 workloadFactory, memoryManager, dataLayout);
1510}
1511
1512LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
1513 armnn::IWorkloadFactory& workloadFactory,
1514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1515 bool forceNoPadding)
1516{
1517 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
1518 workloadFactory, memoryManager, forceNoPadding);
1519}
1520
1521LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
1522 armnn::IWorkloadFactory& workloadFactory,
1523 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1524{
1525 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1526}
1527
1528LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
1529 armnn::IWorkloadFactory& workloadFactory,
1530 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1531{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001532 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001533 workloadFactory, memoryManager, 0.5, -1);
1534}
1535
1536LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
1537 armnn::IWorkloadFactory& workloadFactory,
1538 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1539{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001540 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001541 workloadFactory, memoryManager);
1542}
1543LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
1544 armnn::IWorkloadFactory& workloadFactory,
1545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1546{
1547 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1548}
1549
1550LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
1551 armnn::IWorkloadFactory& workloadFactory,
1552 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1553{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001554 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001555 workloadFactory, memoryManager);
1556}
1557
1558LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
1559 armnn::IWorkloadFactory& workloadFactory,
1560 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1561{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001562 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001563 workloadFactory, memoryManager);
1564}
1565
1566LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
1567 armnn::IWorkloadFactory& workloadFactory,
1568 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1569{
1570 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
1571 workloadFactory, memoryManager);
1572}
1573
1574LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
1575 armnn::IWorkloadFactory& workloadFactory,
1576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1577{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001578 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001579 workloadFactory, memoryManager);
1580}
1581
1582LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
1583 armnn::IWorkloadFactory& workloadFactory,
1584 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1585{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001586 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001587 workloadFactory, memoryManager);
1588}
1589
1590LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
1591 armnn::IWorkloadFactory& workloadFactory,
1592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1593{
1594 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1595}
1596
1597LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
1598 armnn::IWorkloadFactory& workloadFactory,
1599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1600{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001601 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001602 workloadFactory, memoryManager);
1603}
1604
1605LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
1606 armnn::IWorkloadFactory& workloadFactory,
1607 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1608{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001609 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001610 workloadFactory, memoryManager);
1611}
1612
1613LayerTestResult<float, 4> SimpleL2Pooling2dTest(
1614 armnn::IWorkloadFactory& workloadFactory,
1615 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1616 const armnn::DataLayout dataLayout)
1617{
1618 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
1619}
1620
1621LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
1622 armnn::IWorkloadFactory& workloadFactory,
1623 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1624 const armnn::DataLayout dataLayout)
1625{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001626 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001627}
1628
1629LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
1630 armnn::IWorkloadFactory& workloadFactory,
1631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1632 const armnn::DataLayout dataLayout)
1633{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001634 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001635}
1636
1637LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
1638 armnn::IWorkloadFactory& workloadFactory,
1639 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1640{
1641 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1642}
1643
1644LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
1645 armnn::IWorkloadFactory& workloadFactory,
1646 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1647{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001648 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001649}
1650
1651LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
1652 armnn::IWorkloadFactory& workloadFactory,
1653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1654{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001655 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001656}
1657
1658LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
1659 armnn::IWorkloadFactory& workloadFactory,
1660 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1661{
1662 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1663}
1664
1665LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
1666 armnn::IWorkloadFactory& workloadFactory,
1667 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1668{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001669 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001670}
1671
1672LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
1673 armnn::IWorkloadFactory& workloadFactory,
1674 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1675{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001676 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001677}
1678LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
1679 armnn::IWorkloadFactory& workloadFactory,
1680 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1681{
1682 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1683}
1684
1685LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
1686 armnn::IWorkloadFactory& workloadFactory,
1687 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1688{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001689 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001690}
1691
1692LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
1693 armnn::IWorkloadFactory& workloadFactory,
1694 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1695{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001696 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001697}
1698
1699LayerTestResult<float, 4> L2Pooling2dSize7Test(
1700 armnn::IWorkloadFactory& workloadFactory,
1701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1702{
1703 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1704}
1705
1706LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
1707 armnn::IWorkloadFactory& workloadFactory,
1708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1709{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001710 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001711}
1712
1713LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
1714 armnn::IWorkloadFactory& workloadFactory,
1715 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1716{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001717 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001718}
1719
1720LayerTestResult<float, 4> L2Pooling2dSize9Test(
1721 armnn::IWorkloadFactory& workloadFactory,
1722 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1723{
1724 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1725}
1726
1727LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
1728 armnn::IWorkloadFactory& workloadFactory,
1729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1730{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001731 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001732}
1733
1734LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
1735 armnn::IWorkloadFactory& workloadFactory,
1736 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1737{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001738 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001739}
1740LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
1741 armnn::IWorkloadFactory& workloadFactory,
1742 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1743{
1744 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1745}
1746
1747LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
1748 armnn::IWorkloadFactory& workloadFactory,
1749 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1750{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001751 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001752}
1753
1754LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
1755 armnn::IWorkloadFactory& workloadFactory,
1756 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1757{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001758 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001759}
1760
1761LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
1762 armnn::IWorkloadFactory& workloadFactory,
1763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1764{
1765 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1766}
1767
1768LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
1769 armnn::IWorkloadFactory& workloadFactory,
1770 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1771{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001772 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001773}
1774
1775LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
1776 armnn::IWorkloadFactory& workloadFactory,
1777 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1778{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001779 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001780}
1781
1782LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
1783 armnn::IWorkloadFactory& workloadFactory,
1784 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1785{
1786 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
1787}
1788
1789LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
1790 armnn::IWorkloadFactory& workloadFactory,
1791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1792{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001793 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001794}
1795
1796LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
1797 armnn::IWorkloadFactory& workloadFactory,
1798 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1799{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001800 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001801}
1802
1803LayerTestResult<float, 4> ComparePooling2dTest(
1804 armnn::IWorkloadFactory& workloadFactory,
1805 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1806 armnn::IWorkloadFactory& refWorkloadFactory,
1807 armnn::PoolingAlgorithm poolingType)
1808{
1809 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
1810 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1811}
1812
1813LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
1814 armnn::IWorkloadFactory& workloadFactory,
1815 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1816 armnn::IWorkloadFactory& refWorkloadFactory,
1817 armnn::PoolingAlgorithm poolingType)
1818{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001819 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001820 workloadFactory, memoryManager, refWorkloadFactory, poolingType, 0.1f, 128);
1821}
1822
1823LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
1824 armnn::IWorkloadFactory& workloadFactory,
1825 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1826 armnn::IWorkloadFactory& refWorkloadFactory,
1827 armnn::PoolingAlgorithm poolingType)
1828{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001829 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001830 workloadFactory, memoryManager, refWorkloadFactory, poolingType);
1831}