blob: a4f87ff3ed49f4e991b98167af2eb2ff885adc6a [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa014fcda012018-03-09 14:13:49 +00005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "Pooling2dTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010
11#include <armnn/LayerSupport.hpp>
12
13#include <armnnUtils/TensorUtils.hpp>
14#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016
Jan Eilers8eb25602020-03-09 12:13:48 +000017#include <armnn/utility/IgnoreUnused.hpp>
18
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000019#include <backendsCommon/WorkloadInfo.hpp>
20
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010021#include <backendsCommon/test/TensorCopyUtils.hpp>
22#include <backendsCommon/test/WorkloadTestUtils.hpp>
23
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000024#include <test/TensorHelpers.hpp>
25
James Conroy45a9b772018-10-31 11:47:53 +000026#include <boost/numeric/conversion/cast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000027
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010028namespace
29{
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000030
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010031using namespace armnnUtils;
32
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000033template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000034LayerTestResult<T, 4> SimplePooling2dTestImpl(
35 armnn::IWorkloadFactory& workloadFactory,
36 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +010037 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000038 armnn::Pooling2dDescriptor descriptor,
39 float qScale,
40 int32_t qOffset,
41 const boost::multi_array<T, 4>& input,
42 const boost::multi_array<T, 4>& outputExpected)
telsoa014fcda012018-03-09 14:13:49 +000043{
Jan Eilers8eb25602020-03-09 12:13:48 +000044 IgnoreUnused(memoryManager);
Matthew Bentham8800c002018-11-19 13:19:28 +000045 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
Matteo Martincigh21350152018-11-28 16:22:22 +000046 const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
Matthew Bentham8800c002018-11-19 13:19:28 +000047 auto heightIndex = dimensionIndices.GetHeightIndex();
48 auto widthIndex = dimensionIndices.GetWidthIndex();
49 auto channelsIndex = dimensionIndices.GetChannelsIndex();
telsoa014fcda012018-03-09 14:13:49 +000050
James Conroy69482272018-10-19 10:41:35 +010051 unsigned int inputHeight = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
52 unsigned int inputWidth = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
53 unsigned int inputChannels = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
54 unsigned int inputBatchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
55
56 unsigned int outputHeight = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
57 unsigned int outputWidth = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
58 unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
telsoa014fcda012018-03-09 14:13:49 +000059 unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
60
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000061 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
62 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
63
64 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
65 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000066
67 // Set quantization parameters if the requested type is a quantized type.
68 if(armnn::IsQuantizedType<T>())
69 {
70 inputTensorInfo.SetQuantizationScale(qScale);
71 inputTensorInfo.SetQuantizationOffset(qOffset);
72 outputTensorInfo.SetQuantizationScale(qScale);
73 outputTensorInfo.SetQuantizationOffset(qOffset);
74 }
75
76 LayerTestResult<T, 4> result(outputTensorInfo);
77
Finn Williams826a5432020-08-27 16:15:20 +010078 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
79 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +000080
81 armnn::Pooling2dQueueDescriptor queueDescriptor;
82 queueDescriptor.m_Parameters = descriptor;
James Conroy45a9b772018-10-31 11:47:53 +000083 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +010084
85 armnn::WorkloadInfo workloadInfo;
86 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
87 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
88
89 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +010090 armnn::BackendId backend = workloadFactory.GetBackendId();
Francis Murtagh043d0d02018-10-05 14:08:48 +010091 const size_t reasonIfUnsupportedMaxLen = 255;
92 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +010093 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
Francis Murtagh043d0d02018-10-05 14:08:48 +010094 queueDescriptor.m_Parameters,
95 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
96 if (!result.supported)
97 {
98 return result;
99 }
100
101 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
102
103 inputHandle->Allocate();
104 outputHandle->Allocate();
105
106 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
107
108 workload->Execute();
109
110 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
111
112 result.outputExpected = outputExpected;
113
114 return result;
115}
116
telsoa014fcda012018-03-09 14:13:49 +0000117//
118// Tests max pooling with the following parameters:
119//
120// Pooling size: 3x3
121// Stride: (2,4)
122// input size: 8x13
123// channels: 2
124// batch size: 2
125//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000126template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000127LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
128 armnn::IWorkloadFactory& workloadFactory,
129 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100130 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000131 bool forceNoPadding,
132 float qScale = 1.0f,
133 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000134{
135 armnn::Pooling2dDescriptor descriptor;
136 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
137 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
138 descriptor.m_StrideX = 2;
139 descriptor.m_StrideY = 4;
140 // forceNoPadding is mainly used for compatibility with ARM Compute.
141 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
142 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
143 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
144 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
145 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
146
147 unsigned int inputWidth = 8;
148 unsigned int inputHeight = 13;
149 unsigned int outputWidth =
150 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
151 descriptor.m_StrideX;
152 unsigned int outputHeight =
153 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
154 descriptor.m_StrideY;
155 unsigned int channels = 2;
156 unsigned int batchSize = 2;
157
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000158 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
159 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000160
161 // Set quantization parameters if the requested type is a quantized type.
162 if(armnn::IsQuantizedType<T>())
163 {
164 inputTensorInfo.SetQuantizationScale(qScale);
165 inputTensorInfo.SetQuantizationOffset(qOffset);
166 outputTensorInfo.SetQuantizationScale(qScale);
167 outputTensorInfo.SetQuantizationOffset(qOffset);
168 }
169
170 std::vector<float> singleChannelData({
171 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
172 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
173 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
174 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
175 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
176 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
177 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
178 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
179 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
180 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
181 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
182 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
183 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
184 });
185
telsoa01c577f2c2018-08-31 09:22:23 +0100186 // Constructs input data.
telsoa014fcda012018-03-09 14:13:49 +0000187 std::vector<float> inputData;
188 auto negator = [](float f) { return -f; };
189
telsoa01c577f2c2018-08-31 09:22:23 +0100190 // First image (two channels where the second channel is the negative of the first one).
telsoa014fcda012018-03-09 14:13:49 +0000191 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
192 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
193
telsoa01c577f2c2018-08-31 09:22:23 +0100194 // Second image (same as first image).
telsoa014fcda012018-03-09 14:13:49 +0000195 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
196 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
197
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100198 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000199
telsoa01c577f2c2018-08-31 09:22:23 +0100200 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000201 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
202 boost::multi_array<T, 4> outputExpected(shape);
203 if (forceNoPadding)
204 {
205 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100206 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000207 8.0f, 8.0f, 8.0f,
208 9.0f, 7.0f, 9.0f,
209 9.0f, 9.0f, 9.0f,
210
211 0.0f, 0.0f, -3.0f,
212 -1.0f, 0.0f, 0.0f,
213 -1.0f, -1.0f, -1.0f,
214
215 8.0f, 8.0f, 8.0f,
216 9.0f, 7.0f, 9.0f,
217 9.0f, 9.0f, 9.0f,
218
219 0.0f, 0.0f, -3.0f,
220 -1.0f, 0.0f, 0.0f,
221 -1.0f, -1.0f, -1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100222 },
223 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000224 }
225 else
226 {
227 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100228 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000229 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
230 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
231 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
232
Finn Williams70f609b2019-11-06 16:54:53 +0000233 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
234 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
235 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
telsoa014fcda012018-03-09 14:13:49 +0000236
237 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
238 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
239 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
240
Finn Williams70f609b2019-11-06 16:54:53 +0000241 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
242 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
243 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100244 },
245 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000246 }
247
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000248 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100249 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000250}
251
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000252template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000253LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
254 armnn::IWorkloadFactory& workloadFactory,
255 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100256 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000257 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000258 float qScale = 1.0f,
259 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000260{
261 armnn::Pooling2dDescriptor descriptor;
James Conroy45a9b772018-10-31 11:47:53 +0000262 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
telsoa014fcda012018-03-09 14:13:49 +0000263 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
264 descriptor.m_StrideX = descriptor.m_StrideY = 2;
telsoa014fcda012018-03-09 14:13:49 +0000265 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy69482272018-10-19 10:41:35 +0100266 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000267
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000268 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
269 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000270
271 // Set quantization parameters if the requested type is a quantized type.
272 if(armnn::IsQuantizedType<T>())
273 {
274 inputTensorInfo.SetQuantizationScale(qScale);
275 inputTensorInfo.SetQuantizationOffset(qOffset);
276 outputTensorInfo.SetQuantizationScale(qScale);
277 outputTensorInfo.SetQuantizationOffset(qOffset);
278 }
279
James Conroy45a9b772018-10-31 11:47:53 +0000280 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100281 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000282 1.0f, 2.0f, 5.0f, 6.0f,
283 3.0f, 4.0f, 7.0f, 8.0f,
284 9.0f, 10.0f, 13.0f, 14.0f,
285 11.0f, 12.0f, 15.0f, 16.0f,
286
287 17.0f, 18.0f, 21.0f, 22.0f,
288 19.0f, 20.0f, 23.0f, 24.0f,
289 25.0f, 26.0f, 29.0f, 30.0f,
290 27.0f, 28.0f, 31.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100291 },
292 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000293
James Conroy45a9b772018-10-31 11:47:53 +0000294 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100295 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000296 4.0f, 8.0f,
297 12.0f, 16.0f,
298
299 20.0f, 24.0f,
300 28.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100301 },
302 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000303
James Conroy45a9b772018-10-31 11:47:53 +0000304 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000305 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000306 {
307 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000308 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000309 inputData = tmp;
310
311 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000312 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000313 outputData = tmp1;
314 }
315
316 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
317
318 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
319
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000320 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100321 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000322}
323
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000324template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000325LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
326 armnn::IWorkloadFactory& workloadFactory,
327 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100328 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000329 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000330 float qScale = 1.0f,
331 int32_t qOffset = 0)
Francis Murtagh043d0d02018-10-05 14:08:48 +0100332{
James Conroy45a9b772018-10-31 11:47:53 +0000333 armnn::Pooling2dDescriptor descriptor;
334 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
335 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
336 descriptor.m_StrideX = descriptor.m_StrideY = 2;
337 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
338 descriptor.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +0100339
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000340 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
341 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100342
James Conroy45a9b772018-10-31 11:47:53 +0000343 // Set quantization parameters if the requested type is a quantized type.
344 if(armnn::IsQuantizedType<T>())
345 {
346 inputTensorInfo.SetQuantizationScale(qScale);
347 inputTensorInfo.SetQuantizationOffset(qOffset);
348 outputTensorInfo.SetQuantizationScale(qScale);
349 outputTensorInfo.SetQuantizationOffset(qOffset);
350 }
Francis Murtagh043d0d02018-10-05 14:08:48 +0100351
James Conroy45a9b772018-10-31 11:47:53 +0000352 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100353 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000354 2.0f, 2.0f, 6.0f, 6.0f,
355 4.0f, 4.0f, 8.0f, 8.0f,
356 10.0f, 12.0f, 14.0f, 16.0f,
357 10.0f, 12.0f, 16.0f, 14.0f,
358
359 18.0f, 20.0f, 24.0f, 22.0f,
360 20.0f, 18.0f, 22.0f, 24.0f,
361 26.0f, 28.0f, 0.0f, 0.0f,
362 26.0f, 28.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100363 },
364 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000365
366 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100367 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000368 3.0f, 7.0f,
369 11.0f, 15.0f,
370
371 19.0f, 23.0f,
372 27.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100373 },
374 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000375
376 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000377 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000378 {
379 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000380 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000381 inputData = tmp;
382
383 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000384 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000385 outputData = tmp1;
386 }
387
388 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
389
390 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
391
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000392 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100393 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100394}
395
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000396template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000397LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
398 armnn::IWorkloadFactory& workloadFactory,
399 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100400 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000401 float qScale = 1.0f,
402 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000403{
404 armnn::Pooling2dDescriptor descriptor;
405 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
406 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
407 descriptor.m_StrideX = descriptor.m_StrideY = 5;
408 descriptor.m_PadLeft = 50;
409 descriptor.m_PadRight = 50;
410 descriptor.m_PadTop = 50;
411 descriptor.m_PadBottom = 50;
412 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
413
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000414 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
415 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000416
417 // Set quantization parameters if the requested type is a quantized type.
418 if(armnn::IsQuantizedType<T>())
419 {
420 inputTensorInfo.SetQuantizationScale(qScale);
421 inputTensorInfo.SetQuantizationOffset(qOffset);
422 outputTensorInfo.SetQuantizationScale(qScale);
423 outputTensorInfo.SetQuantizationOffset(qOffset);
424 }
425
426 std::vector<T> inputVec;
427
428 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
429 {
430 inputVec.push_back(1);
431 }
432
433 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
434
435 std::vector<T> outputVec;
436
437 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
438 {
439 outputVec.push_back(1);
440 }
441
442 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
443
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000444 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100445 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000446}
447
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000448template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000449LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
450 armnn::IWorkloadFactory& workloadFactory,
451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100452 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000453 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000454 float qScale = 1.0f,
455 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000456{
457 armnn::Pooling2dDescriptor descriptor;
458 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
459 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
460 descriptor.m_StrideX = descriptor.m_StrideY = 2;
461 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy45a9b772018-10-31 11:47:53 +0000462 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000463
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000464 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
465 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
James Conroy45a9b772018-10-31 11:47:53 +0000466
467 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100468 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000469 1.0f, 7.0f, 5.0f, 5.0f,
470 1.0f, 7.0f, 5.0f, 5.0f,
471 3.0f, 3.0f, 1.0f, 1.0f,
472 3.0f, 3.0f, 1.0f, 1.0f,
473
474 1.0f, 7.0f, 0.0f, 0.0f,
475 1.0f, 7.0f, 2.0f, 0.0f,
476 0.0f, 2.0f, 1.0f, 1.0f,
477 0.0f, 0.0f, 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100478 },
479 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000480
James Conroy45a9b772018-10-31 11:47:53 +0000481 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100482 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000483 5.0f, 5.0f,
James Conroy45a9b772018-10-31 11:47:53 +0000484 3.0f, 1.0f,
485
486 5.0f, 1.0f,
487 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100488 },
489 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000490
James Conroy45a9b772018-10-31 11:47:53 +0000491 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000492 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000493 {
494 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000495 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000496 inputData = tmp;
497
498 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000499 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000500 outputData = tmp1;
501 }
502
503 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
504
505 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
506
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000507 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100508 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000509}
510
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000511template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000512LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
513 armnn::IWorkloadFactory& workloadFactory,
514 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100515 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000516 float qScale = 1.0f,
517 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000518{
519 armnn::Pooling2dDescriptor descriptor;
520 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
521 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
522 descriptor.m_StrideX = descriptor.m_StrideY = 1;
523 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
524
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000525 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000526 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100527 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000528 2.0f, 1.0f, 5.0f, 2.0f,
529 1.0f, 2.0f, 2.0f, 1.0f,
530 5.0f, 4.0f, 1.0f, 5.0f,
531 2.0f, 1.0f, 5.0f, 2.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100532 },
533 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000534
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000535 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000536 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100537 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000538 3.0f, 3.0f,
539 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100540 },
541 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000542
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000543 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100544 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000545}
546
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000547template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000548LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
549 armnn::IWorkloadFactory& workloadFactory,
550 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100551 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000552 float qScale = 1.0f,
553 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000554{
555 armnn::Pooling2dDescriptor descriptor;
556 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
557 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
558 descriptor.m_StrideX = descriptor.m_StrideY = 3;
559 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
560
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000561 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000562 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100563 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000564 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
565 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
566 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
567 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
568 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
569 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
570 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
571 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
572 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100573 },
574 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000575
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000576 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000577 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100578 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000579 3.0f, 3.0f, 3.0f,
580 3.0f, 3.0f, 3.0f,
581 3.0f, 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100582 },
583 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000584
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000585 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100586 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000587}
588
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000589template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000590LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
591 armnn::IWorkloadFactory& workloadFactory,
592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100593 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000594 float qScale = 1.0f,
595 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000596{
597 armnn::Pooling2dDescriptor descriptor;
598 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
599 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
600 descriptor.m_StrideX = descriptor.m_StrideY = 4;
601 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
602
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000603 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000604 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100605 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000606 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
607 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
608 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
609 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
610 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
611 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
612 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100613 },
614 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000615
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000616 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000617 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100618 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000619 3.0f, 3.0f,
620 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100621 },
622 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000623
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000624 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100625 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000626}
627
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000628template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000629LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
630 armnn::IWorkloadFactory& workloadFactory,
631 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100632 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000633 float qScale = 1.0f,
634 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000635{
636 armnn::Pooling2dDescriptor descriptor;
637 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
638 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
639 descriptor.m_StrideX = descriptor.m_StrideY = 7;
640 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
641
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000642 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000643 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100644 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000645 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
646 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
647 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
648 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
649 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
650 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
651 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100652 },
653 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000654
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000655 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000656 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100657 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000658 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100659 },
660 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000661
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000662 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100663 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000664}
665
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000666template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000667LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
668 armnn::IWorkloadFactory& workloadFactory,
669 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100670 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000671 float qScale = 1.0f,
672 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000673{
674 armnn::Pooling2dDescriptor descriptor;
675 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
676 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
677 descriptor.m_StrideX = descriptor.m_StrideY = 9;
678 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
679
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000680 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000681 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100682 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000683 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
684 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
685 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
686 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
687 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
688 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
689 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
690 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
691 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100692 },
693 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000694
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000695 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000696 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100697 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000698 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100699 },
700 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000701
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000702 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100703 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000704}
705
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000706template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000707LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
708 armnn::IWorkloadFactory& workloadFactory,
709 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100710 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000711 float qScale = 1.0f,
712 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000713{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000714 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
715 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000716
717 armnn::Pooling2dDescriptor descriptor;
718 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
719 descriptor.m_PoolWidth = 2;
720 descriptor.m_PoolHeight = 3;
721 descriptor.m_StrideX = 2;
722 descriptor.m_StrideY = 1;
723 descriptor.m_PadLeft = 2;
724 descriptor.m_PadRight = 0;
725 descriptor.m_PadTop = 1;
726 descriptor.m_PadBottom = 2;
727 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
728 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
729
telsoa01c577f2c2018-08-31 09:22:23 +0100730 // Construct input data.
telsoa014fcda012018-03-09 14:13:49 +0000731 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100732 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000733 1.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100734 },
735 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000736
telsoa01c577f2c2018-08-31 09:22:23 +0100737 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000738 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100739 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000740 0.0f, 3.0f, 0.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100741 },
742 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000743
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000744 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100745 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000746}
747
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000748template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000749LayerTestResult<T, 4> ComparePooling2dTestCommon(
750 armnn::IWorkloadFactory& workloadFactory,
751 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
752 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100753 const armnn::ITensorHandleFactory& tensorHandleFactory,
754 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000755 armnn::PoolingAlgorithm poolingType,
756 float qScale = 1.0f,
757 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000758{
Jan Eilers8eb25602020-03-09 12:13:48 +0000759 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000760 const unsigned int inputWidth = 16;
761 const unsigned int inputHeight = 32;
762 const unsigned int channelCount = 2;
763 const unsigned int batchSize = 5;
764
765 const unsigned int poolSize = 3;
766 const unsigned int strideX = 2;
767 const unsigned int strideY = 4;
768 const unsigned int padX = 0;
769 const unsigned int padY = 0;
770
771 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
772 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
773
774 armnn::TensorInfo inputTensorInfo;
775 armnn::TensorInfo outputTensorInfo;
776
777 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
778 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
779
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000780 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
781 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000782
783 // Set quantization parameters if the requested type is a quantized type.
784 if(armnn::IsQuantizedType<T>())
785 {
786 inputTensorInfo.SetQuantizationScale(qScale);
787 inputTensorInfo.SetQuantizationOffset(qOffset);
788 outputTensorInfo.SetQuantizationScale(qScale);
789 outputTensorInfo.SetQuantizationOffset(qOffset);
790 }
791
792 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
793
794 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
795
Finn Williams826a5432020-08-27 16:15:20 +0100796 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
797 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000798
799 armnn::Pooling2dQueueDescriptor data;
800 armnn::WorkloadInfo info;
801 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
802 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
803 data.m_Parameters.m_PoolType = poolingType;
804 data.m_Parameters.m_PoolWidth = poolSize;
805 data.m_Parameters.m_PoolHeight = poolSize;
806 data.m_Parameters.m_StrideX = strideX;
807 data.m_Parameters.m_StrideY = strideY;
808 data.m_Parameters.m_PadLeft = padX;
809 data.m_Parameters.m_PadRight = padX;
810 data.m_Parameters.m_PadTop = padY;
811 data.m_Parameters.m_PadBottom = padY;
812 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
813
Finn Williams826a5432020-08-27 16:15:20 +0100814 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
815 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000816
817 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100818 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000819 const size_t reasonIfUnsupportedMaxLen = 255;
820 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100821 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
telsoa014fcda012018-03-09 14:13:49 +0000822 data.m_Parameters,
823 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
824 if (!comparisonResult.supported)
825 {
826 return comparisonResult;
827 }
828
829 armnn::Pooling2dQueueDescriptor refData = data;
830 armnn::WorkloadInfo refInfo = info;
831 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
832 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
833
834 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
835 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
836
837 outputHandleRef->Allocate();
838 inputHandleRef->Allocate();
839 inputHandle->Allocate();
840 outputHandle->Allocate();
841
842 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
843 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
844
845 workload->Execute();
846 workloadRef->Execute();
847
848 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
849 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
850
851 return comparisonResult;
852}
853
854//
855// Tests max pooling with the following parameters:
856//
857// Pooling size: 2x2
858// Stride: (2,2)
859// input size: 4x4
860// channels: 1
861// batch size: 1
862//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000863template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000864LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
865 armnn::IWorkloadFactory& workloadFactory,
866 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100867 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000868 bool forceNoPadding,
869 float qScale = 1.0f,
870 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000871{
872 armnn::Pooling2dDescriptor descriptor;
873 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
874 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
875 descriptor.m_StrideX = 2;
876 descriptor.m_StrideY = 2;
877 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
878 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
879 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
880 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
881
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000882
telsoa014fcda012018-03-09 14:13:49 +0000883 unsigned int inputWidth = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000884
telsoa014fcda012018-03-09 14:13:49 +0000885 unsigned int inputHeight = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000886
telsoa014fcda012018-03-09 14:13:49 +0000887 unsigned int outputWidth =
888 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
889 descriptor.m_StrideX;
890 unsigned int outputHeight =
891 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
892 descriptor.m_StrideY;
893 unsigned int channels = 1;
894 unsigned int batchSize = 1;
895
896 std::vector<float> inputData = {
897 510.0f, 222.0f, 780.0f, 654.0f,
898 141.0f, 276.0f, 15.0f, 546.0f,
899 303.0f, 618.0f, 582.0f, 339.0f,
900 438.0f, 564.0f, 573.0f, 402.0f
901 };
902
telsoa01c577f2c2018-08-31 09:22:23 +0100903 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
telsoa014fcda012018-03-09 14:13:49 +0000904 std::vector<float> expectedOutputDataWithPadding = {
905 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
906 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
907 };
908
909 std::vector<float> expectedOutputDataNoPadding = {
910 510.0f, 780.0f,
911 618.0f, 582.0f
912 };
913
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000914 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000915
916 // Scale and offset should match input - we're just calculating maximum values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000917 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000918
919 // Set quantization parameters if the requested type is a quantized type.
920 if(armnn::IsQuantizedType<T>())
921 {
922 inputTensorInfo.SetQuantizationScale(qScale);
923 inputTensorInfo.SetQuantizationOffset(qOffset);
924 outputTensorInfo.SetQuantizationScale(qScale);
925 outputTensorInfo.SetQuantizationOffset(qOffset);
926 }
927
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100928 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000929
930 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100931 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
932 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000933
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000934 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100935 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000936}
937
surmeh01bceff2f2018-03-29 16:29:27 +0100938//
939// Tests max pooling with the following parameters:
940//
941// Pooling size: 3x2
942// Stride: (2,2)
943// input size: 3x2
944// channels: 1
945// batch size: 1
946//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000947template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
surmeh01bceff2f2018-03-29 16:29:27 +0100948LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
949 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000950 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100951 const armnn::ITensorHandleFactory& tensorHandleFactory,
surmeh01bceff2f2018-03-29 16:29:27 +0100952 bool forceNoPadding,
953 float qScale = 1.0f,
954 int32_t qOffset = 0)
955{
956 armnn::Pooling2dDescriptor descriptor;
957 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
958 descriptor.m_PoolWidth = 3;
959 descriptor.m_PoolHeight = 2;
960 descriptor.m_StrideX = 2;
961 descriptor.m_StrideY = 2;
962 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
963 descriptor.m_PadRight = descriptor.m_PadLeft;
964 descriptor.m_PadTop = 0;
965 descriptor.m_PadBottom = 0;
966 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
967 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
968
969 unsigned int inputWidth = 3;
970 unsigned int inputHeight = 2;
971 unsigned int outputWidth =
972 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
973 descriptor.m_StrideX;
974 unsigned int outputHeight =
975 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
976 descriptor.m_StrideY;
977 unsigned int channels = 1;
978 unsigned int batchSize = 1;
979
980 std::vector<float> inputData = {
981 3.0f, 6.0f, 9.0f,
982 12.0f, 15.0f, 18.0f,
983 };
984
985 std::vector<float> expectedOutputDataWithPadding = {
986 6.0f, 8.0f,
987 };
988
989 std::vector<float> expectedOutputDataNoPadding = {
990 10.5f,
991 };
992
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000993 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100994
995 // Scale and offset should match input - we're just calculating average values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000996 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100997
998 // Set quantization parameters if the requested type is a quantized type.
999 if(armnn::IsQuantizedType<T>())
1000 {
1001 inputTensorInfo.SetQuantizationScale(qScale);
1002 inputTensorInfo.SetQuantizationOffset(qOffset);
1003 outputTensorInfo.SetQuantizationScale(qScale);
1004 outputTensorInfo.SetQuantizationOffset(qOffset);
1005 }
1006
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001007 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +01001008
1009 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001010 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1011 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +01001012
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001013 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001014 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
surmeh01bceff2f2018-03-29 16:29:27 +01001015}
1016
1017
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001018template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001019LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1020 armnn::IWorkloadFactory& workloadFactory,
1021 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001022 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001023 float qScale = 1.0f,
1024 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001025{
1026 armnn::Pooling2dDescriptor descriptor;
1027 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1028 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1029 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1030 descriptor.m_PadLeft = 1;
1031 descriptor.m_PadRight = 1;
1032 descriptor.m_PadTop = 1;
1033 descriptor.m_PadBottom = 1;
1034 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1035
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001036 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1037 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001038
1039 // Set quantization parameters if the requested type is a quantized type.
1040 if(armnn::IsQuantizedType<T>())
1041 {
1042 inputTensorInfo.SetQuantizationScale(qScale);
1043 inputTensorInfo.SetQuantizationOffset(qOffset);
1044 outputTensorInfo.SetQuantizationScale(qScale);
1045 outputTensorInfo.SetQuantizationOffset(qOffset);
1046 }
1047
1048 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001049 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001050 -1.0f, -2.0f, 3.0f, 4.0f,
1051 -1.0f, -2.0f, 3.0f, 4.0f,
1052 1.0f, 2.0f, -3.0f, -4.0f,
1053 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001054 },
1055 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001056
1057 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001058 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001059 -1.0f, 3.0f, 4.0f,
1060 1.0f, 3.0f, 4.0f,
1061 1.0f, 2.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001062 },
1063 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001064
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001065 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001066 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001067}
1068
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001069template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001070LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1071 armnn::IWorkloadFactory& workloadFactory,
1072 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001073 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001074 float qScale = 1.0f,
1075 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001076{
1077 armnn::Pooling2dDescriptor descriptor;
1078 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1079 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1080 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1081 descriptor.m_PadLeft = 1;
1082 descriptor.m_PadRight = 1;
1083 descriptor.m_PadTop = 1;
1084 descriptor.m_PadBottom = 1;
1085 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1086
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001087 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1088 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001089
1090 // Set quantization parameters if the requested type is a quantized type.
1091 if(armnn::IsQuantizedType<T>())
1092 {
1093 inputTensorInfo.SetQuantizationScale(qScale);
1094 inputTensorInfo.SetQuantizationOffset(qOffset);
1095 outputTensorInfo.SetQuantizationScale(qScale);
1096 outputTensorInfo.SetQuantizationOffset(qOffset);
1097 }
1098
1099 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001100 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001101 -1.0f, -2.0f, 3.0f, 4.0f,
1102 -1.0f, -2.0f, 3.0f, 4.0f,
1103 1.0f, 2.0f, -3.0f, -4.0f,
1104 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001105 },
1106 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001107
1108 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001109 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001110 -1.0f, 3.0f, 4.0f, 4.0f,
1111 2.0f, 3.0f, 4.0f, 4.0f,
1112 2.0f, 3.0f, 4.0f, 4.0f,
1113 2.0f, 2.0f, 2.0f, -3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001114 },
1115 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001116
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001117 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001118 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001119}
1120
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001121template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001122LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1123 armnn::IWorkloadFactory& workloadFactory,
1124 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001125 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001126 float qScale = 1.0f,
1127 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001128{
1129 armnn::Pooling2dDescriptor descriptor;
1130 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1131 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1132 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1133 descriptor.m_PadLeft = 1;
1134 descriptor.m_PadRight = 1;
1135 descriptor.m_PadTop = 1;
1136 descriptor.m_PadBottom = 1;
1137 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1138
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001139 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1140 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001141
1142 // Set quantization parameters if the requested type is a quantized type.
1143 if(armnn::IsQuantizedType<T>())
1144 {
1145 inputTensorInfo.SetQuantizationScale(qScale);
1146 inputTensorInfo.SetQuantizationOffset(qOffset);
1147 outputTensorInfo.SetQuantizationScale(qScale);
1148 outputTensorInfo.SetQuantizationOffset(qOffset);
1149 }
1150
1151 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001152 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001153 12.0f, 20.0f, 32.0f, 40.0f,
1154 12.0f, 20.0f, 32.0f, 40.0f,
1155 12.0f, 20.0f, 32.0f, 40.0f,
1156 12.0f, 20.0f, 32.0f, 40.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001157 },
1158 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001159
1160 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001161 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001162 3.0f, 13.0f, 10.0f,
1163 6.0f, 26.0f, 20.0f,
1164 3.0f, 13.0f, 10.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001165 },
1166 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001167
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001168 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001169 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001170}
1171
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001172template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001173LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1174 armnn::IWorkloadFactory& workloadFactory,
1175 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001176 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001177 float qScale = 1.0f,
1178 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001179{
1180 armnn::Pooling2dDescriptor descriptor;
1181 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1182 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1183 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1184 descriptor.m_PadLeft = 0;
1185 descriptor.m_PadRight = 0;
1186 descriptor.m_PadTop = 0;
1187 descriptor.m_PadBottom = 0;
1188 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1189 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1190
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001191 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1192 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001193
1194 // Set quantization parameters if the requested type is a quantized type.
1195 if(armnn::IsQuantizedType<T>())
1196 {
1197 inputTensorInfo.SetQuantizationScale(qScale);
1198 inputTensorInfo.SetQuantizationOffset(qOffset);
1199 outputTensorInfo.SetQuantizationScale(qScale);
1200 outputTensorInfo.SetQuantizationOffset(qOffset);
1201 }
1202
1203 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001204 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001205 1.0f, 2.0f, 3.0f, 4.0f,
1206 1.0f, 2.0f, 3.0f, 4.0f,
1207 1.0f, 2.0f, 3.0f, 4.0f,
1208 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001209 },
1210 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001211
1212 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001213 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001214 2.0f, 3.5f,
1215 2.0f, 3.5f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001216 },
1217 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001218
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001219 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001220 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001221}
1222
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001223template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001224LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1225 armnn::IWorkloadFactory& workloadFactory,
1226 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001227 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001228 float qScale = 1.0f,
1229 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001230{
1231 armnn::Pooling2dDescriptor descriptor;
1232 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1233 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1234 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1235 descriptor.m_PadLeft = 1;
1236 descriptor.m_PadRight = 1;
1237 descriptor.m_PadTop = 1;
1238 descriptor.m_PadBottom = 1;
1239 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1240
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001241 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1242 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001243
1244 // Set quantization parameters if the requested type is a quantized type.
1245 if(armnn::IsQuantizedType<T>())
1246 {
1247 inputTensorInfo.SetQuantizationScale(qScale);
1248 inputTensorInfo.SetQuantizationOffset(qOffset);
1249 outputTensorInfo.SetQuantizationScale(qScale);
1250 outputTensorInfo.SetQuantizationOffset(qOffset);
1251 }
1252
1253 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001254 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001255 9.0f, 27.0f, 18.0f, 36.0f,
1256 18.0f, 9.0f, 18.0f, 9.0f,
1257 27.0f, 18.0f, 9.0f, 27.0f,
1258 9.0f, 27.0f, 9.0f, 18.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001259 },
1260 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001261
1262 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001263 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001264 7.0f, 11.0f, 13.0f, 9.0f,
1265 12.0f, 17.0f, 19.0f, 13.0f,
1266 12.0f, 16.0f, 16.0f, 10.0f,
1267 9.0f, 11.0f, 12.0f, 7.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001268 },
1269 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001270
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001271 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001272 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001273}
1274
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001275template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001276LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1277 armnn::IWorkloadFactory& workloadFactory,
1278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001279 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001280 float qScale = 1.0f,
1281 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001282{
1283 armnn::Pooling2dDescriptor descriptor;
1284 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1285 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1286 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1287 descriptor.m_PadLeft = 1;
1288 descriptor.m_PadRight = 1;
1289 descriptor.m_PadTop = 1;
1290 descriptor.m_PadBottom = 1;
1291 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1292
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001293 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1294 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001295
1296 // Set quantization parameters if the requested type is a quantized type.
1297 if(armnn::IsQuantizedType<T>())
1298 {
1299 inputTensorInfo.SetQuantizationScale(qScale);
1300 inputTensorInfo.SetQuantizationOffset(qOffset);
1301 outputTensorInfo.SetQuantizationScale(qScale);
1302 outputTensorInfo.SetQuantizationOffset(qOffset);
1303 }
1304
1305 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001306 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001307 2.0f, 4.0f, 8.0f, 16.0f,
1308 4.0f, 2.0f, 2.0f, 4.0f,
1309 8.0f, 2.0f, 4.0f, 2.0f,
1310 16.0f, 2.0f, 2.0f, 8.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001311 },
1312 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001313
1314 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001315 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001316 1.0f, 4.4721f, 8.0f,
1317 4.4721f, 2.6457f, 2.236f,
1318 8.0f, 1.4142f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001319 },
1320 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001321
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001322 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001323 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001324}
1325
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001326template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001327LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1328 armnn::IWorkloadFactory& workloadFactory,
1329 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001330 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001331 float qScale = 1.0f,
1332 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001333{
1334 armnn::Pooling2dDescriptor descriptor;
1335 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1336 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1337 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1338 descriptor.m_PadLeft = 1;
1339 descriptor.m_PadRight = 1;
1340 descriptor.m_PadTop = 1;
1341 descriptor.m_PadBottom = 1;
1342 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1343
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001344 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1345 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001346
1347 // Set quantization parameters if the requested type is a quantized type.
1348 if(armnn::IsQuantizedType<T>())
1349 {
1350 inputTensorInfo.SetQuantizationScale(qScale);
1351 inputTensorInfo.SetQuantizationOffset(qOffset);
1352 outputTensorInfo.SetQuantizationScale(qScale);
1353 outputTensorInfo.SetQuantizationOffset(qOffset);
1354 }
1355
1356 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001357 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001358 1.0f, 2.0f, 3.0f, 4.0f,
1359 1.0f, 2.0f, 3.0f, 4.0f,
1360 1.0f, 2.0f, 3.0f, 4.0f,
1361 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001362 },
1363 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001364
1365 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001366 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001367 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1368 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1369 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1370 1.0540f, 1.7638f, 2.5385f, 2.3570f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001371 },
1372 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001373
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001374 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001375 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001376}
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001377
1378} // anonymous namespace
1379
1380LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
1381 armnn::IWorkloadFactory& workloadFactory,
1382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001383 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001384 bool forceNoPadding)
1385{
1386 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001387 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001388}
1389
1390LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
1391 armnn::IWorkloadFactory& workloadFactory,
1392 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001393 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001394 bool forceNoPadding)
1395{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001396 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001397 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001398}
1399
1400LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
1401 armnn::IWorkloadFactory& workloadFactory,
1402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001403 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001404 bool forceNoPadding)
1405{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001406 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001407 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001408}
1409
1410LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
1411 armnn::IWorkloadFactory& workloadFactory,
1412 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001413 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001414 bool forceNoPadding)
1415{
1416 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001417 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001418}
1419
1420LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
1421 armnn::IWorkloadFactory& workloadFactory,
1422 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001423 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001424 bool forceNoPadding)
1425{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001426 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001427 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001428}
1429
1430LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
1431 armnn::IWorkloadFactory& workloadFactory,
1432 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001433 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001434 bool forceNoPadding)
1435{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001436 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001437 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001438}
1439
1440LayerTestResult<float, 4> SimpleMaxPooling2dTest(
1441 armnn::IWorkloadFactory& workloadFactory,
1442 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001443 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001444 const armnn::DataLayout dataLayout)
1445{
Finn Williams826a5432020-08-27 16:15:20 +01001446 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1447 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001448}
1449
1450LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
1451 armnn::IWorkloadFactory& workloadFactory,
1452 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001453 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001454 const armnn::DataLayout dataLayout)
1455{
Finn Williams826a5432020-08-27 16:15:20 +01001456 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1457 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001458}
1459
1460LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
1461 armnn::IWorkloadFactory& workloadFactory,
1462 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001463 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001464 const armnn::DataLayout dataLayout)
1465{
Finn Williams826a5432020-08-27 16:15:20 +01001466 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1467 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001468}
1469LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
1470 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001471 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1472 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001473{
Finn Williams826a5432020-08-27 16:15:20 +01001474 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1475 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001476}
1477
1478LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
1479 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001480 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1481 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001482{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001483 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001484 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001485}
1486
1487LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
1488 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001489 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1490 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001491{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001492 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001493 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001494}
1495
1496LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
1497 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1499 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001500{
Finn Williams826a5432020-08-27 16:15:20 +01001501 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1502 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001503}
1504
1505LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
1506 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001507 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1508 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001509{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001510 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001511 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001512}
1513
1514LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
1515 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001516 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1517 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001518{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001519 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001520 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001521}
1522
1523LayerTestResult<float, 4> SimpleAveragePooling2dTest(
1524 armnn::IWorkloadFactory& workloadFactory,
1525 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001526 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001527 const armnn::DataLayout dataLayout)
1528{
Finn Williams826a5432020-08-27 16:15:20 +01001529 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1530 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001531}
1532
1533LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
1534 armnn::IWorkloadFactory& workloadFactory,
1535 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001536 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001537 const armnn::DataLayout dataLayout)
1538{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001539 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001540 workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001541}
1542
1543LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
1544 armnn::IWorkloadFactory& workloadFactory,
1545 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001546 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001547 const armnn::DataLayout dataLayout)
1548{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001549 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001550 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001551}
1552
1553LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
1554 armnn::IWorkloadFactory& workloadFactory,
1555 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001556 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001557 bool forceNoPadding)
1558{
1559 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001560 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001561}
1562
1563LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
1564 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001565 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1566 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001567{
Finn Williams826a5432020-08-27 16:15:20 +01001568 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1569 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001570}
1571
1572LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
1573 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001574 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1575 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001576{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001577 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001578 workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001579}
1580
1581LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
1582 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001583 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1584 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001585{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001586 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001587 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001588}
1589LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
1590 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1592 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001593{
Finn Williams826a5432020-08-27 16:15:20 +01001594 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1595 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001596}
1597
1598LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
1599 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001600 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1601 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001602{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001603 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001604 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001605}
1606
1607LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
1608 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001609 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1610 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001611{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001612 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001613 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001614}
1615
1616LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
1617 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001618 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1619 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001620{
1621 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001622 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001623}
1624
1625LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
1626 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001627 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1628 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001629{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001630 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001631 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001632}
1633
1634LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
1635 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001636 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1637 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001638{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001639 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001640 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001641}
1642
1643LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
1644 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001645 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1646 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001647{
Finn Williams826a5432020-08-27 16:15:20 +01001648 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1649 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001650}
1651
1652LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
1653 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001654 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1655 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001656{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001657 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001658 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001659}
1660
1661LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
1662 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001663 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1664 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001665{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001666 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001667 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001668}
1669
1670LayerTestResult<float, 4> SimpleL2Pooling2dTest(
1671 armnn::IWorkloadFactory& workloadFactory,
1672 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001673 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001674 const armnn::DataLayout dataLayout)
1675{
Finn Williams826a5432020-08-27 16:15:20 +01001676 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1677 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001678}
1679
1680LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
1681 armnn::IWorkloadFactory& workloadFactory,
1682 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001683 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001684 const armnn::DataLayout dataLayout)
1685{
Finn Williams826a5432020-08-27 16:15:20 +01001686 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1687 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001688}
1689
1690LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
1691 armnn::IWorkloadFactory& workloadFactory,
1692 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001693 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001694 const armnn::DataLayout dataLayout)
1695{
Finn Williams826a5432020-08-27 16:15:20 +01001696 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1697 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001698}
1699
1700LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
1701 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001702 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1703 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001704{
Finn Williams826a5432020-08-27 16:15:20 +01001705 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1706 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001707}
1708
1709LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
1710 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001711 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1712 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001713{
Finn Williams826a5432020-08-27 16:15:20 +01001714 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1715 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001716}
1717
1718LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
1719 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001720 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1721 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001722{
Finn Williams826a5432020-08-27 16:15:20 +01001723 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1724 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001725}
1726
1727LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
1728 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001729 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1730 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001731{
Finn Williams826a5432020-08-27 16:15:20 +01001732 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1733 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001734}
1735
1736LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
1737 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001738 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1739 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001740{
Finn Williams826a5432020-08-27 16:15:20 +01001741 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1742 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001743}
1744
1745LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
1746 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001747 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1748 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001749{
Finn Williams826a5432020-08-27 16:15:20 +01001750 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1751 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001752}
1753LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
1754 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001755 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1756 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001757{
Finn Williams826a5432020-08-27 16:15:20 +01001758 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1759 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001760}
1761
1762LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
1763 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001764 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1765 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001766{
Finn Williams826a5432020-08-27 16:15:20 +01001767 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1768 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001769}
1770
1771LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
1772 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001773 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1774 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001775{
Finn Williams826a5432020-08-27 16:15:20 +01001776 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1777 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001778}
1779
1780LayerTestResult<float, 4> L2Pooling2dSize7Test(
1781 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001782 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1783 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001784{
Finn Williams826a5432020-08-27 16:15:20 +01001785 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1786 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001787}
1788
1789LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
1790 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001791 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1792 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001793{
Finn Williams826a5432020-08-27 16:15:20 +01001794 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1795 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001796}
1797
1798LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
1799 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001800 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1801 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001802{
Finn Williams826a5432020-08-27 16:15:20 +01001803 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1804 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001805}
1806
1807LayerTestResult<float, 4> L2Pooling2dSize9Test(
1808 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001809 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1810 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001811{
Finn Williams826a5432020-08-27 16:15:20 +01001812 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1813 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001814}
1815
1816LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
1817 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001818 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1819 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001820{
Finn Williams826a5432020-08-27 16:15:20 +01001821 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1822 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001823}
1824
1825LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
1826 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001827 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1828 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001829{
Finn Williams826a5432020-08-27 16:15:20 +01001830 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1831 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001832}
1833LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
1834 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001835 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1836 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001837{
Finn Williams826a5432020-08-27 16:15:20 +01001838 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1839 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001840}
1841
1842LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
1843 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001844 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1845 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001846{
Finn Williams826a5432020-08-27 16:15:20 +01001847 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1848 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001849}
1850
1851LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
1852 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001853 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1854 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001855{
Finn Williams826a5432020-08-27 16:15:20 +01001856 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1857 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001858}
1859
1860LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
1861 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001862 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1863 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001864{
Finn Williams826a5432020-08-27 16:15:20 +01001865 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1866 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001867}
1868
1869LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
1870 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001871 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1872 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001873{
Finn Williams826a5432020-08-27 16:15:20 +01001874 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1875 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001876}
1877
1878LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
1879 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001880 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1881 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001882{
Finn Williams826a5432020-08-27 16:15:20 +01001883 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1884 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001885}
1886
1887LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
1888 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001889 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1890 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001891{
Finn Williams826a5432020-08-27 16:15:20 +01001892 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1893 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001894}
1895
1896LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
1897 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001898 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1899 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001900{
Finn Williams826a5432020-08-27 16:15:20 +01001901 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1902 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001903}
1904
1905LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
1906 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001907 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1908 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001909{
Finn Williams826a5432020-08-27 16:15:20 +01001910 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1911 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001912}
1913
1914LayerTestResult<float, 4> ComparePooling2dTest(
1915 armnn::IWorkloadFactory& workloadFactory,
1916 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1917 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001918 const armnn::ITensorHandleFactory& tensorHandleFactory,
1919 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001920 armnn::PoolingAlgorithm poolingType)
1921{
1922 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001923 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001924}
1925
1926LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
1927 armnn::IWorkloadFactory& workloadFactory,
1928 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1929 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001930 const armnn::ITensorHandleFactory& tensorHandleFactory,
1931 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001932 armnn::PoolingAlgorithm poolingType)
1933{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001934 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001935 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1936 poolingType, 0.1f, 128);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001937}
1938
1939LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
1940 armnn::IWorkloadFactory& workloadFactory,
1941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1942 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001943 const armnn::ITensorHandleFactory& tensorHandleFactory,
1944 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001945 armnn::PoolingAlgorithm poolingType)
1946{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001947 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001948 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001949}