blob: 2275b9f07aeba4210a9e0a4fb034243656f0d87f [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Teresa Charlinfbf0e5b2020-08-17 01:01:06 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa014fcda012018-03-09 14:13:49 +00005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "Pooling2dTestImpl.hpp"
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009#include <ResolveType.hpp>
Matteo Martincighe011d202019-11-28 11:35:47 +000010
11#include <armnn/LayerSupport.hpp>
12
13#include <armnnUtils/TensorUtils.hpp>
14#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000016
Jan Eilers8eb25602020-03-09 12:13:48 +000017#include <armnn/utility/IgnoreUnused.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010018#include <armnn/utility/NumericCast.hpp>
Jan Eilers8eb25602020-03-09 12:13:48 +000019
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000020#include <backendsCommon/WorkloadInfo.hpp>
21
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010022#include <backendsCommon/test/TensorCopyUtils.hpp>
23#include <backendsCommon/test/WorkloadTestUtils.hpp>
24
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000025#include <test/TensorHelpers.hpp>
26
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010027namespace
28{
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +000029
Aron Virginas-Tar48623a02019-10-22 10:00:28 +010030using namespace armnnUtils;
31
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000032template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000033LayerTestResult<T, 4> SimplePooling2dTestImpl(
34 armnn::IWorkloadFactory& workloadFactory,
35 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +010036 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +000037 armnn::Pooling2dDescriptor descriptor,
38 float qScale,
39 int32_t qOffset,
40 const boost::multi_array<T, 4>& input,
41 const boost::multi_array<T, 4>& outputExpected)
telsoa014fcda012018-03-09 14:13:49 +000042{
Jan Eilers8eb25602020-03-09 12:13:48 +000043 IgnoreUnused(memoryManager);
Matthew Bentham8800c002018-11-19 13:19:28 +000044 const armnn::DataLayout dataLayout = descriptor.m_DataLayout;
Matteo Martincigh21350152018-11-28 16:22:22 +000045 const armnnUtils::DataLayoutIndexed dimensionIndices = dataLayout;
Matthew Bentham8800c002018-11-19 13:19:28 +000046 auto heightIndex = dimensionIndices.GetHeightIndex();
47 auto widthIndex = dimensionIndices.GetWidthIndex();
48 auto channelsIndex = dimensionIndices.GetChannelsIndex();
telsoa014fcda012018-03-09 14:13:49 +000049
Matthew Sloyan171214c2020-09-09 09:07:37 +010050 unsigned int inputHeight = armnn::numeric_cast<unsigned int>(input.shape()[heightIndex]);
51 unsigned int inputWidth = armnn::numeric_cast<unsigned int>(input.shape()[widthIndex]);
52 unsigned int inputChannels = armnn::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
53 unsigned int inputBatchSize = armnn::numeric_cast<unsigned int>(input.shape()[0]);
James Conroy69482272018-10-19 10:41:35 +010054
Matthew Sloyan171214c2020-09-09 09:07:37 +010055 unsigned int outputHeight = armnn::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
56 unsigned int outputWidth = armnn::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
57 unsigned int outputChannels = armnn::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
58 unsigned int outputBatchSize = armnn::numeric_cast<unsigned int>(outputExpected.shape()[0]);
telsoa014fcda012018-03-09 14:13:49 +000059
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +000060 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(
61 inputBatchSize, inputChannels, inputHeight, inputWidth, dataLayout, ArmnnType);
62
63 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(
64 outputBatchSize, outputChannels, outputHeight, outputWidth, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +000065
66 // Set quantization parameters if the requested type is a quantized type.
67 if(armnn::IsQuantizedType<T>())
68 {
69 inputTensorInfo.SetQuantizationScale(qScale);
70 inputTensorInfo.SetQuantizationOffset(qOffset);
71 outputTensorInfo.SetQuantizationScale(qScale);
72 outputTensorInfo.SetQuantizationOffset(qOffset);
73 }
74
75 LayerTestResult<T, 4> result(outputTensorInfo);
76
Finn Williams826a5432020-08-27 16:15:20 +010077 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
78 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +000079
80 armnn::Pooling2dQueueDescriptor queueDescriptor;
81 queueDescriptor.m_Parameters = descriptor;
James Conroy45a9b772018-10-31 11:47:53 +000082 queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +010083
84 armnn::WorkloadInfo workloadInfo;
85 AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
86 AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
87
88 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +010089 armnn::BackendId backend = workloadFactory.GetBackendId();
Francis Murtagh043d0d02018-10-05 14:08:48 +010090 const size_t reasonIfUnsupportedMaxLen = 255;
91 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +010092 result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
Francis Murtagh043d0d02018-10-05 14:08:48 +010093 queueDescriptor.m_Parameters,
94 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
95 if (!result.supported)
96 {
97 return result;
98 }
99
100 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
101
102 inputHandle->Allocate();
103 outputHandle->Allocate();
104
105 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
106
107 workload->Execute();
108
109 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
110
111 result.outputExpected = outputExpected;
112
113 return result;
114}
115
telsoa014fcda012018-03-09 14:13:49 +0000116//
117// Tests max pooling with the following parameters:
118//
119// Pooling size: 3x3
120// Stride: (2,4)
121// input size: 8x13
122// channels: 2
123// batch size: 2
124//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000125template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000126LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(
127 armnn::IWorkloadFactory& workloadFactory,
128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100129 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000130 bool forceNoPadding,
131 float qScale = 1.0f,
132 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000133{
134 armnn::Pooling2dDescriptor descriptor;
135 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
136 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
137 descriptor.m_StrideX = 2;
138 descriptor.m_StrideY = 4;
139 // forceNoPadding is mainly used for compatibility with ARM Compute.
140 // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
141 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
142 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
143 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
144 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
145
146 unsigned int inputWidth = 8;
147 unsigned int inputHeight = 13;
148 unsigned int outputWidth =
149 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
150 descriptor.m_StrideX;
151 unsigned int outputHeight =
152 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
153 descriptor.m_StrideY;
154 unsigned int channels = 2;
155 unsigned int batchSize = 2;
156
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000157 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
158 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000159
160 // Set quantization parameters if the requested type is a quantized type.
161 if(armnn::IsQuantizedType<T>())
162 {
163 inputTensorInfo.SetQuantizationScale(qScale);
164 inputTensorInfo.SetQuantizationOffset(qOffset);
165 outputTensorInfo.SetQuantizationScale(qScale);
166 outputTensorInfo.SetQuantizationOffset(qOffset);
167 }
168
169 std::vector<float> singleChannelData({
170 0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
171 1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
172 8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
173 8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
174 5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
175 1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
176 9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
177 1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
178 6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
179 8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
180 7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
181 4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
182 3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
183 });
184
telsoa01c577f2c2018-08-31 09:22:23 +0100185 // Constructs input data.
telsoa014fcda012018-03-09 14:13:49 +0000186 std::vector<float> inputData;
187 auto negator = [](float f) { return -f; };
188
telsoa01c577f2c2018-08-31 09:22:23 +0100189 // First image (two channels where the second channel is the negative of the first one).
telsoa014fcda012018-03-09 14:13:49 +0000190 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
191 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
192
telsoa01c577f2c2018-08-31 09:22:23 +0100193 // Second image (same as first image).
telsoa014fcda012018-03-09 14:13:49 +0000194 inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
195 std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
196
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100197 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000198
telsoa01c577f2c2018-08-31 09:22:23 +0100199 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000200 auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
201 boost::multi_array<T, 4> outputExpected(shape);
202 if (forceNoPadding)
203 {
204 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100205 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000206 8.0f, 8.0f, 8.0f,
207 9.0f, 7.0f, 9.0f,
208 9.0f, 9.0f, 9.0f,
209
210 0.0f, 0.0f, -3.0f,
211 -1.0f, 0.0f, 0.0f,
212 -1.0f, -1.0f, -1.0f,
213
214 8.0f, 8.0f, 8.0f,
215 9.0f, 7.0f, 9.0f,
216 9.0f, 9.0f, 9.0f,
217
218 0.0f, 0.0f, -3.0f,
219 -1.0f, 0.0f, 0.0f,
220 -1.0f, -1.0f, -1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100221 },
222 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000223 }
224 else
225 {
226 outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100227 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000228 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
229 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
230 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
231
Finn Williams70f609b2019-11-06 16:54:53 +0000232 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
233 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
234 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f,
telsoa014fcda012018-03-09 14:13:49 +0000235
236 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
237 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
238 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
239
Finn Williams70f609b2019-11-06 16:54:53 +0000240 0.0f, 0.0f, 0.0f, 0.0f,-3.0f,-3.0f,
241 0.0f,-1.0f, 0.0f, 0.0f, 0.0f,-2.0f,
242 0.0f,-1.0f,-1.0f,-1.0f,-1.0f,-1.0f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100243 },
244 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000245 }
246
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000247 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100248 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000249}
250
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000251template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000252LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(
253 armnn::IWorkloadFactory& workloadFactory,
254 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100255 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000256 const armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000257 float qScale = 1.0f,
258 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000259{
260 armnn::Pooling2dDescriptor descriptor;
James Conroy45a9b772018-10-31 11:47:53 +0000261 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
telsoa014fcda012018-03-09 14:13:49 +0000262 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
263 descriptor.m_StrideX = descriptor.m_StrideY = 2;
telsoa014fcda012018-03-09 14:13:49 +0000264 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy69482272018-10-19 10:41:35 +0100265 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000266
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000267 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
268 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000269
270 // Set quantization parameters if the requested type is a quantized type.
271 if(armnn::IsQuantizedType<T>())
272 {
273 inputTensorInfo.SetQuantizationScale(qScale);
274 inputTensorInfo.SetQuantizationOffset(qOffset);
275 outputTensorInfo.SetQuantizationScale(qScale);
276 outputTensorInfo.SetQuantizationOffset(qOffset);
277 }
278
James Conroy45a9b772018-10-31 11:47:53 +0000279 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100280 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000281 1.0f, 2.0f, 5.0f, 6.0f,
282 3.0f, 4.0f, 7.0f, 8.0f,
283 9.0f, 10.0f, 13.0f, 14.0f,
284 11.0f, 12.0f, 15.0f, 16.0f,
285
286 17.0f, 18.0f, 21.0f, 22.0f,
287 19.0f, 20.0f, 23.0f, 24.0f,
288 25.0f, 26.0f, 29.0f, 30.0f,
289 27.0f, 28.0f, 31.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100290 },
291 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000292
James Conroy45a9b772018-10-31 11:47:53 +0000293 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100294 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000295 4.0f, 8.0f,
296 12.0f, 16.0f,
297
298 20.0f, 24.0f,
299 28.0f, 32.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100300 },
301 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000302
James Conroy45a9b772018-10-31 11:47:53 +0000303 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000304 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000305 {
306 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000307 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000308 inputData = tmp;
309
310 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000311 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000312 outputData = tmp1;
313 }
314
315 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
316
317 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
318
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000319 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100320 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000321}
322
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000323template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000324LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(
325 armnn::IWorkloadFactory& workloadFactory,
326 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100327 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000328 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000329 float qScale = 1.0f,
330 int32_t qOffset = 0)
Francis Murtagh043d0d02018-10-05 14:08:48 +0100331{
James Conroy45a9b772018-10-31 11:47:53 +0000332 armnn::Pooling2dDescriptor descriptor;
333 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
334 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
335 descriptor.m_StrideX = descriptor.m_StrideY = 2;
336 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
337 descriptor.m_DataLayout = dataLayout;
Francis Murtagh043d0d02018-10-05 14:08:48 +0100338
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000339 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
340 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100341
James Conroy45a9b772018-10-31 11:47:53 +0000342 // Set quantization parameters if the requested type is a quantized type.
343 if(armnn::IsQuantizedType<T>())
344 {
345 inputTensorInfo.SetQuantizationScale(qScale);
346 inputTensorInfo.SetQuantizationOffset(qOffset);
347 outputTensorInfo.SetQuantizationScale(qScale);
348 outputTensorInfo.SetQuantizationOffset(qOffset);
349 }
Francis Murtagh043d0d02018-10-05 14:08:48 +0100350
James Conroy45a9b772018-10-31 11:47:53 +0000351 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100352 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000353 2.0f, 2.0f, 6.0f, 6.0f,
354 4.0f, 4.0f, 8.0f, 8.0f,
355 10.0f, 12.0f, 14.0f, 16.0f,
356 10.0f, 12.0f, 16.0f, 14.0f,
357
358 18.0f, 20.0f, 24.0f, 22.0f,
359 20.0f, 18.0f, 22.0f, 24.0f,
360 26.0f, 28.0f, 0.0f, 0.0f,
361 26.0f, 28.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100362 },
363 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000364
365 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100366 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000367 3.0f, 7.0f,
368 11.0f, 15.0f,
369
370 19.0f, 23.0f,
371 27.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100372 },
373 qScale, qOffset));
James Conroy45a9b772018-10-31 11:47:53 +0000374
375 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000376 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000377 {
378 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000379 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000380 inputData = tmp;
381
382 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000383 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000384 outputData = tmp1;
385 }
386
387 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
388
389 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
390
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000391 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100392 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
Francis Murtagh043d0d02018-10-05 14:08:48 +0100393}
394
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000395template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000396LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(
397 armnn::IWorkloadFactory& workloadFactory,
398 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100399 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000400 float qScale = 1.0f,
401 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000402{
403 armnn::Pooling2dDescriptor descriptor;
404 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
405 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
406 descriptor.m_StrideX = descriptor.m_StrideY = 5;
407 descriptor.m_PadLeft = 50;
408 descriptor.m_PadRight = 50;
409 descriptor.m_PadTop = 50;
410 descriptor.m_PadBottom = 50;
411 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
412
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000413 armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, ArmnnType);
414 armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000415
416 // Set quantization parameters if the requested type is a quantized type.
417 if(armnn::IsQuantizedType<T>())
418 {
419 inputTensorInfo.SetQuantizationScale(qScale);
420 inputTensorInfo.SetQuantizationOffset(qOffset);
421 outputTensorInfo.SetQuantizationScale(qScale);
422 outputTensorInfo.SetQuantizationOffset(qOffset);
423 }
424
425 std::vector<T> inputVec;
426
427 for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
428 {
429 inputVec.push_back(1);
430 }
431
432 auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
433
434 std::vector<T> outputVec;
435
436 for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
437 {
438 outputVec.push_back(1);
439 }
440
441 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
442
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000443 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100444 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000445}
446
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000447template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000448LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(
449 armnn::IWorkloadFactory& workloadFactory,
450 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100451 const armnn::ITensorHandleFactory& tensorHandleFactory,
Matthew Bentham8800c002018-11-19 13:19:28 +0000452 armnn::DataLayout dataLayout = armnn::DataLayout::NCHW,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000453 float qScale = 1.0f,
454 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000455{
456 armnn::Pooling2dDescriptor descriptor;
457 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
458 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
459 descriptor.m_StrideX = descriptor.m_StrideY = 2;
460 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
James Conroy45a9b772018-10-31 11:47:53 +0000461 descriptor.m_DataLayout = dataLayout;
telsoa014fcda012018-03-09 14:13:49 +0000462
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000463 armnn::TensorInfo inputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
464 armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
James Conroy45a9b772018-10-31 11:47:53 +0000465
466 std::vector<T> inputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100467 QuantizedVector<T>({
James Conroy45a9b772018-10-31 11:47:53 +0000468 1.0f, 7.0f, 5.0f, 5.0f,
469 1.0f, 7.0f, 5.0f, 5.0f,
470 3.0f, 3.0f, 1.0f, 1.0f,
471 3.0f, 3.0f, 1.0f, 1.0f,
472
473 1.0f, 7.0f, 0.0f, 0.0f,
474 1.0f, 7.0f, 2.0f, 0.0f,
475 0.0f, 2.0f, 1.0f, 1.0f,
476 0.0f, 0.0f, 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100477 },
478 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000479
James Conroy45a9b772018-10-31 11:47:53 +0000480 std::vector<T> outputData(
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100481 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000482 5.0f, 5.0f,
James Conroy45a9b772018-10-31 11:47:53 +0000483 3.0f, 1.0f,
484
485 5.0f, 1.0f,
486 1.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100487 },
488 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000489
James Conroy45a9b772018-10-31 11:47:53 +0000490 const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
Matthew Bentham8800c002018-11-19 13:19:28 +0000491 if (dataLayout == armnn::DataLayout::NHWC)
James Conroy45a9b772018-10-31 11:47:53 +0000492 {
493 std::vector<T> tmp(inputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000494 armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000495 inputData = tmp;
496
497 std::vector<T> tmp1(outputData.size());
Matteo Martincighd5b9e642019-01-04 18:01:21 +0000498 armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(T));
James Conroy45a9b772018-10-31 11:47:53 +0000499 outputData = tmp1;
500 }
501
502 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
503
504 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
505
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000506 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100507 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000508}
509
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000510template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000511LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(
512 armnn::IWorkloadFactory& workloadFactory,
513 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100514 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000515 float qScale = 1.0f,
516 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000517{
518 armnn::Pooling2dDescriptor descriptor;
519 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
520 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
521 descriptor.m_StrideX = descriptor.m_StrideY = 1;
522 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
523
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000524 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000525 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100526 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000527 2.0f, 1.0f, 5.0f, 2.0f,
528 1.0f, 2.0f, 2.0f, 1.0f,
529 5.0f, 4.0f, 1.0f, 5.0f,
530 2.0f, 1.0f, 5.0f, 2.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100531 },
532 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000533
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000534 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000535 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100536 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000537 3.0f, 3.0f,
538 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100539 },
540 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000541
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000542 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100543 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000544}
545
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000546template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000547LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(
548 armnn::IWorkloadFactory& workloadFactory,
549 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100550 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000551 float qScale = 1.0f,
552 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000553{
554 armnn::Pooling2dDescriptor descriptor;
555 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
556 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
557 descriptor.m_StrideX = descriptor.m_StrideY = 3;
558 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
559
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000560 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000561 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100562 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000563 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
564 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
565 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
566 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
567 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
568 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
569 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
570 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
571 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100572 },
573 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000574
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000575 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000576 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100577 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000578 3.0f, 3.0f, 3.0f,
579 3.0f, 3.0f, 3.0f,
580 3.0f, 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100581 },
582 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000583
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000584 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100585 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000586}
587
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000588template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000589LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(
590 armnn::IWorkloadFactory& workloadFactory,
591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100592 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000593 float qScale = 1.0f,
594 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000595{
596 armnn::Pooling2dDescriptor descriptor;
597 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
598 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
599 descriptor.m_StrideX = descriptor.m_StrideY = 4;
600 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
601
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000602 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000603 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100604 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000605 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
606 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
607 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
608 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
609 2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
610 1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
611 5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100612 },
613 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000614
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000615 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000616 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100617 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000618 3.0f, 3.0f,
619 3.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100620 },
621 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000622
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000623 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100624 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000625}
626
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000627template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000628LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(
629 armnn::IWorkloadFactory& workloadFactory,
630 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100631 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000632 float qScale = 1.0f,
633 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000634{
635 armnn::Pooling2dDescriptor descriptor;
636 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
637 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
638 descriptor.m_StrideX = descriptor.m_StrideY = 7;
639 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
640
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000641 armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000642 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100643 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000644 1.0f, 0.0f, 2.0f, 0.0f, 3.0f, 0.0f, 4.0f,
645 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
646 0.0f, 5.0f, 0.0f, 6.0f, 0.0f, 7.0f, 0.0f,
647 8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
648 0.0f, 5.0f, 0.0f, 2.0f, 0.0f, 1.0f, 1.0f,
649 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
650 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100651 },
652 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000653
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000654 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000655 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100656 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000657 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100658 },
659 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000660
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000661 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100662 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000663}
664
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000665template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000666LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(
667 armnn::IWorkloadFactory& workloadFactory,
668 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100669 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000670 float qScale = 1.0f,
671 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000672{
673 armnn::Pooling2dDescriptor descriptor;
674 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
675 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
676 descriptor.m_StrideX = descriptor.m_StrideY = 9;
677 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
678
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000679 armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000680 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100681 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000682 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
683 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
684 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
685 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
686 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
687 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
688 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
689 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
690 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100691 },
692 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000693
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000694 armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000695 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100696 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000697 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100698 },
699 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000700
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000701 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100702 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000703}
704
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000705template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000706LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(
707 armnn::IWorkloadFactory& workloadFactory,
708 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100709 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000710 float qScale = 1.0f,
711 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000712{
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000713 armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, ArmnnType);
714 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000715
716 armnn::Pooling2dDescriptor descriptor;
717 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
718 descriptor.m_PoolWidth = 2;
719 descriptor.m_PoolHeight = 3;
720 descriptor.m_StrideX = 2;
721 descriptor.m_StrideY = 1;
722 descriptor.m_PadLeft = 2;
723 descriptor.m_PadRight = 0;
724 descriptor.m_PadTop = 1;
725 descriptor.m_PadBottom = 2;
726 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
727 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
728
telsoa01c577f2c2018-08-31 09:22:23 +0100729 // Construct input data.
telsoa014fcda012018-03-09 14:13:49 +0000730 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100731 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000732 1.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100733 },
734 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000735
telsoa01c577f2c2018-08-31 09:22:23 +0100736 // These were calculated manually.
telsoa014fcda012018-03-09 14:13:49 +0000737 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100738 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +0000739 0.0f, 3.0f, 0.0f, 3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100740 },
741 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000742
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000743 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100744 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000745}
746
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000747template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000748LayerTestResult<T, 4> ComparePooling2dTestCommon(
749 armnn::IWorkloadFactory& workloadFactory,
750 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
751 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +0100752 const armnn::ITensorHandleFactory& tensorHandleFactory,
753 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000754 armnn::PoolingAlgorithm poolingType,
755 float qScale = 1.0f,
756 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000757{
Jan Eilers8eb25602020-03-09 12:13:48 +0000758 IgnoreUnused(memoryManager);
telsoa014fcda012018-03-09 14:13:49 +0000759 const unsigned int inputWidth = 16;
760 const unsigned int inputHeight = 32;
761 const unsigned int channelCount = 2;
762 const unsigned int batchSize = 5;
763
764 const unsigned int poolSize = 3;
765 const unsigned int strideX = 2;
766 const unsigned int strideY = 4;
767 const unsigned int padX = 0;
768 const unsigned int padY = 0;
769
770 const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
771 const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
772
773 armnn::TensorInfo inputTensorInfo;
774 armnn::TensorInfo outputTensorInfo;
775
776 unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
777 unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
778
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000779 inputTensorInfo = armnn::TensorInfo(4, inputShape, ArmnnType);
780 outputTensorInfo = armnn::TensorInfo(4, outputShape, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000781
782 // Set quantization parameters if the requested type is a quantized type.
783 if(armnn::IsQuantizedType<T>())
784 {
785 inputTensorInfo.SetQuantizationScale(qScale);
786 inputTensorInfo.SetQuantizationOffset(qOffset);
787 outputTensorInfo.SetQuantizationScale(qScale);
788 outputTensorInfo.SetQuantizationOffset(qOffset);
789 }
790
791 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
792
793 LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
794
Finn Williams826a5432020-08-27 16:15:20 +0100795 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
796 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000797
798 armnn::Pooling2dQueueDescriptor data;
799 armnn::WorkloadInfo info;
800 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
801 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
802 data.m_Parameters.m_PoolType = poolingType;
803 data.m_Parameters.m_PoolWidth = poolSize;
804 data.m_Parameters.m_PoolHeight = poolSize;
805 data.m_Parameters.m_StrideX = strideX;
806 data.m_Parameters.m_StrideY = strideY;
807 data.m_Parameters.m_PadLeft = padX;
808 data.m_Parameters.m_PadRight = padX;
809 data.m_Parameters.m_PadTop = padY;
810 data.m_Parameters.m_PadBottom = padY;
811 data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
812
Finn Williams826a5432020-08-27 16:15:20 +0100813 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refTensorHandleFactory.CreateTensorHandle(outputTensorInfo);
814 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refTensorHandleFactory.CreateTensorHandle(inputTensorInfo);
telsoa014fcda012018-03-09 14:13:49 +0000815
816 // Don't execute if Pooling is not supported, as an exception will be raised.
David Beck79141b92018-10-23 16:09:36 +0100817 armnn::BackendId backend = workloadFactory.GetBackendId();
telsoa014fcda012018-03-09 14:13:49 +0000818 const size_t reasonIfUnsupportedMaxLen = 255;
819 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
David Beck79141b92018-10-23 16:09:36 +0100820 comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
telsoa014fcda012018-03-09 14:13:49 +0000821 data.m_Parameters,
822 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
823 if (!comparisonResult.supported)
824 {
825 return comparisonResult;
826 }
827
828 armnn::Pooling2dQueueDescriptor refData = data;
829 armnn::WorkloadInfo refInfo = info;
830 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
831 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
832
833 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
834 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
835
836 outputHandleRef->Allocate();
837 inputHandleRef->Allocate();
838 inputHandle->Allocate();
839 outputHandle->Allocate();
840
841 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
842 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
843
844 workload->Execute();
845 workloadRef->Execute();
846
847 CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
848 CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
849
850 return comparisonResult;
851}
852
853//
854// Tests max pooling with the following parameters:
855//
856// Pooling size: 2x2
857// Stride: (2,2)
858// input size: 4x4
859// channels: 1
860// batch size: 1
861//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000862template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000863LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(
864 armnn::IWorkloadFactory& workloadFactory,
865 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100866 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000867 bool forceNoPadding,
868 float qScale = 1.0f,
869 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +0000870{
871 armnn::Pooling2dDescriptor descriptor;
872 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
873 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
874 descriptor.m_StrideX = 2;
875 descriptor.m_StrideY = 2;
876 descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
877 descriptor.m_PadTop = descriptor.m_PadBottom = 0;
878 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
879 descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
880
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000881
telsoa014fcda012018-03-09 14:13:49 +0000882 unsigned int inputWidth = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000883
telsoa014fcda012018-03-09 14:13:49 +0000884 unsigned int inputHeight = 4;
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000885
telsoa014fcda012018-03-09 14:13:49 +0000886 unsigned int outputWidth =
887 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
888 descriptor.m_StrideX;
889 unsigned int outputHeight =
890 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
891 descriptor.m_StrideY;
892 unsigned int channels = 1;
893 unsigned int batchSize = 1;
894
895 std::vector<float> inputData = {
896 510.0f, 222.0f, 780.0f, 654.0f,
897 141.0f, 276.0f, 15.0f, 546.0f,
898 303.0f, 618.0f, 582.0f, 339.0f,
899 438.0f, 564.0f, 573.0f, 402.0f
900 };
901
telsoa01c577f2c2018-08-31 09:22:23 +0100902 // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
telsoa014fcda012018-03-09 14:13:49 +0000903 std::vector<float> expectedOutputDataWithPadding = {
904 0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
905 0.0f, 438.0f, 618.0f, 402.0f, 0.0f
906 };
907
908 std::vector<float> expectedOutputDataNoPadding = {
909 510.0f, 780.0f,
910 618.0f, 582.0f
911 };
912
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000913 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000914
915 // Scale and offset should match input - we're just calculating maximum values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000916 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +0000917
918 // Set quantization parameters if the requested type is a quantized type.
919 if(armnn::IsQuantizedType<T>())
920 {
921 inputTensorInfo.SetQuantizationScale(qScale);
922 inputTensorInfo.SetQuantizationOffset(qOffset);
923 outputTensorInfo.SetQuantizationScale(qScale);
924 outputTensorInfo.SetQuantizationOffset(qOffset);
925 }
926
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100927 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000928
929 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +0100930 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
931 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +0000932
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000933 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +0100934 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +0000935}
936
surmeh01bceff2f2018-03-29 16:29:27 +0100937//
938// Tests max pooling with the following parameters:
939//
940// Pooling size: 3x2
941// Stride: (2,2)
942// input size: 3x2
943// channels: 1
944// batch size: 1
945//
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000946template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
surmeh01bceff2f2018-03-29 16:29:27 +0100947LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
948 armnn::IWorkloadFactory& workloadFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000949 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +0100950 const armnn::ITensorHandleFactory& tensorHandleFactory,
surmeh01bceff2f2018-03-29 16:29:27 +0100951 bool forceNoPadding,
952 float qScale = 1.0f,
953 int32_t qOffset = 0)
954{
955 armnn::Pooling2dDescriptor descriptor;
956 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
957 descriptor.m_PoolWidth = 3;
958 descriptor.m_PoolHeight = 2;
959 descriptor.m_StrideX = 2;
960 descriptor.m_StrideY = 2;
961 descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
962 descriptor.m_PadRight = descriptor.m_PadLeft;
963 descriptor.m_PadTop = 0;
964 descriptor.m_PadBottom = 0;
965 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
966 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
967
968 unsigned int inputWidth = 3;
969 unsigned int inputHeight = 2;
970 unsigned int outputWidth =
971 (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
972 descriptor.m_StrideX;
973 unsigned int outputHeight =
974 (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
975 descriptor.m_StrideY;
976 unsigned int channels = 1;
977 unsigned int batchSize = 1;
978
979 std::vector<float> inputData = {
980 3.0f, 6.0f, 9.0f,
981 12.0f, 15.0f, 18.0f,
982 };
983
984 std::vector<float> expectedOutputDataWithPadding = {
985 6.0f, 8.0f,
986 };
987
988 std::vector<float> expectedOutputDataNoPadding = {
989 10.5f,
990 };
991
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000992 armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100993
994 // Scale and offset should match input - we're just calculating average values.
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +0000995 armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, ArmnnType);
surmeh01bceff2f2018-03-29 16:29:27 +0100996
997 // Set quantization parameters if the requested type is a quantized type.
998 if(armnn::IsQuantizedType<T>())
999 {
1000 inputTensorInfo.SetQuantizationScale(qScale);
1001 inputTensorInfo.SetQuantizationOffset(qOffset);
1002 outputTensorInfo.SetQuantizationScale(qScale);
1003 outputTensorInfo.SetQuantizationOffset(qOffset);
1004 }
1005
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001006 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +01001007
1008 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001009 forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
1010 QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
surmeh01bceff2f2018-03-29 16:29:27 +01001011
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001012 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001013 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
surmeh01bceff2f2018-03-29 16:29:27 +01001014}
1015
1016
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001017template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001018LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(
1019 armnn::IWorkloadFactory& workloadFactory,
1020 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001021 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001022 float qScale = 1.0f,
1023 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001024{
1025 armnn::Pooling2dDescriptor descriptor;
1026 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1027 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1028 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1029 descriptor.m_PadLeft = 1;
1030 descriptor.m_PadRight = 1;
1031 descriptor.m_PadTop = 1;
1032 descriptor.m_PadBottom = 1;
1033 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1034
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001035 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1036 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001037
1038 // Set quantization parameters if the requested type is a quantized type.
1039 if(armnn::IsQuantizedType<T>())
1040 {
1041 inputTensorInfo.SetQuantizationScale(qScale);
1042 inputTensorInfo.SetQuantizationOffset(qOffset);
1043 outputTensorInfo.SetQuantizationScale(qScale);
1044 outputTensorInfo.SetQuantizationOffset(qOffset);
1045 }
1046
1047 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001048 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001049 -1.0f, -2.0f, 3.0f, 4.0f,
1050 -1.0f, -2.0f, 3.0f, 4.0f,
1051 1.0f, 2.0f, -3.0f, -4.0f,
1052 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001053 },
1054 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001055
1056 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001057 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001058 -1.0f, 3.0f, 4.0f,
1059 1.0f, 3.0f, 4.0f,
1060 1.0f, 2.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001061 },
1062 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001063
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001064 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001065 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001066}
1067
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001068template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001069LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(
1070 armnn::IWorkloadFactory& workloadFactory,
1071 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001072 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001073 float qScale = 1.0f,
1074 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001075{
1076 armnn::Pooling2dDescriptor descriptor;
1077 descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
1078 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1079 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1080 descriptor.m_PadLeft = 1;
1081 descriptor.m_PadRight = 1;
1082 descriptor.m_PadTop = 1;
1083 descriptor.m_PadBottom = 1;
1084 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1085
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001086 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1087 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001088
1089 // Set quantization parameters if the requested type is a quantized type.
1090 if(armnn::IsQuantizedType<T>())
1091 {
1092 inputTensorInfo.SetQuantizationScale(qScale);
1093 inputTensorInfo.SetQuantizationOffset(qOffset);
1094 outputTensorInfo.SetQuantizationScale(qScale);
1095 outputTensorInfo.SetQuantizationOffset(qOffset);
1096 }
1097
1098 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001099 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001100 -1.0f, -2.0f, 3.0f, 4.0f,
1101 -1.0f, -2.0f, 3.0f, 4.0f,
1102 1.0f, 2.0f, -3.0f, -4.0f,
1103 1.0f, 2.0f, -3.0f, -4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001104 },
1105 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001106
1107 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001108 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001109 -1.0f, 3.0f, 4.0f, 4.0f,
1110 2.0f, 3.0f, 4.0f, 4.0f,
1111 2.0f, 3.0f, 4.0f, 4.0f,
1112 2.0f, 2.0f, 2.0f, -3.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001113 },
1114 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001115
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001116 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001117 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001118}
1119
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001120template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001121LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(
1122 armnn::IWorkloadFactory& workloadFactory,
1123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001124 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001125 float qScale = 1.0f,
1126 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001127{
1128 armnn::Pooling2dDescriptor descriptor;
1129 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1130 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1131 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1132 descriptor.m_PadLeft = 1;
1133 descriptor.m_PadRight = 1;
1134 descriptor.m_PadTop = 1;
1135 descriptor.m_PadBottom = 1;
1136 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1137
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001138 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1139 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001140
1141 // Set quantization parameters if the requested type is a quantized type.
1142 if(armnn::IsQuantizedType<T>())
1143 {
1144 inputTensorInfo.SetQuantizationScale(qScale);
1145 inputTensorInfo.SetQuantizationOffset(qOffset);
1146 outputTensorInfo.SetQuantizationScale(qScale);
1147 outputTensorInfo.SetQuantizationOffset(qOffset);
1148 }
1149
1150 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001151 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001152 12.0f, 20.0f, 32.0f, 40.0f,
1153 12.0f, 20.0f, 32.0f, 40.0f,
1154 12.0f, 20.0f, 32.0f, 40.0f,
1155 12.0f, 20.0f, 32.0f, 40.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001156 },
1157 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001158
1159 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001160 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001161 3.0f, 13.0f, 10.0f,
1162 6.0f, 26.0f, 20.0f,
1163 3.0f, 13.0f, 10.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001164 },
1165 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001166
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001167 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001168 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001169}
1170
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001171template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001172LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(
1173 armnn::IWorkloadFactory& workloadFactory,
1174 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001175 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001176 float qScale = 1.0f,
1177 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001178{
1179 armnn::Pooling2dDescriptor descriptor;
1180 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1181 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1182 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1183 descriptor.m_PadLeft = 0;
1184 descriptor.m_PadRight = 0;
1185 descriptor.m_PadTop = 0;
1186 descriptor.m_PadBottom = 0;
1187 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1188 descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
1189
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001190 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, ArmnnType);
1191 armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001192
1193 // Set quantization parameters if the requested type is a quantized type.
1194 if(armnn::IsQuantizedType<T>())
1195 {
1196 inputTensorInfo.SetQuantizationScale(qScale);
1197 inputTensorInfo.SetQuantizationOffset(qOffset);
1198 outputTensorInfo.SetQuantizationScale(qScale);
1199 outputTensorInfo.SetQuantizationOffset(qOffset);
1200 }
1201
1202 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001203 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001204 1.0f, 2.0f, 3.0f, 4.0f,
1205 1.0f, 2.0f, 3.0f, 4.0f,
1206 1.0f, 2.0f, 3.0f, 4.0f,
1207 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001208 },
1209 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001210
1211 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001212 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001213 2.0f, 3.5f,
1214 2.0f, 3.5f
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001215 },
1216 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001217
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001218 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001219 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001220}
1221
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001222template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001223LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(
1224 armnn::IWorkloadFactory& workloadFactory,
1225 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001226 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001227 float qScale = 1.0f,
1228 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001229{
1230 armnn::Pooling2dDescriptor descriptor;
1231 descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
1232 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1233 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1234 descriptor.m_PadLeft = 1;
1235 descriptor.m_PadRight = 1;
1236 descriptor.m_PadTop = 1;
1237 descriptor.m_PadBottom = 1;
1238 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1239
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001240 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1241 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001242
1243 // Set quantization parameters if the requested type is a quantized type.
1244 if(armnn::IsQuantizedType<T>())
1245 {
1246 inputTensorInfo.SetQuantizationScale(qScale);
1247 inputTensorInfo.SetQuantizationOffset(qOffset);
1248 outputTensorInfo.SetQuantizationScale(qScale);
1249 outputTensorInfo.SetQuantizationOffset(qOffset);
1250 }
1251
1252 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001253 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001254 9.0f, 27.0f, 18.0f, 36.0f,
1255 18.0f, 9.0f, 18.0f, 9.0f,
1256 27.0f, 18.0f, 9.0f, 27.0f,
1257 9.0f, 27.0f, 9.0f, 18.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001258 },
1259 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001260
1261 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001262 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001263 7.0f, 11.0f, 13.0f, 9.0f,
1264 12.0f, 17.0f, 19.0f, 13.0f,
1265 12.0f, 16.0f, 16.0f, 10.0f,
1266 9.0f, 11.0f, 12.0f, 7.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001267 },
1268 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001269
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001270 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001271 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001272}
1273
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001274template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001275LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(
1276 armnn::IWorkloadFactory& workloadFactory,
1277 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001278 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001279 float qScale = 1.0f,
1280 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001281{
1282 armnn::Pooling2dDescriptor descriptor;
1283 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1284 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
1285 descriptor.m_StrideX = descriptor.m_StrideY = 2;
1286 descriptor.m_PadLeft = 1;
1287 descriptor.m_PadRight = 1;
1288 descriptor.m_PadTop = 1;
1289 descriptor.m_PadBottom = 1;
1290 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1291
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001292 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1293 armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001294
1295 // Set quantization parameters if the requested type is a quantized type.
1296 if(armnn::IsQuantizedType<T>())
1297 {
1298 inputTensorInfo.SetQuantizationScale(qScale);
1299 inputTensorInfo.SetQuantizationOffset(qOffset);
1300 outputTensorInfo.SetQuantizationScale(qScale);
1301 outputTensorInfo.SetQuantizationOffset(qOffset);
1302 }
1303
1304 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001305 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001306 2.0f, 4.0f, 8.0f, 16.0f,
1307 4.0f, 2.0f, 2.0f, 4.0f,
1308 8.0f, 2.0f, 4.0f, 2.0f,
1309 16.0f, 2.0f, 2.0f, 8.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001310 },
1311 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001312
1313 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001314 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001315 1.0f, 4.4721f, 8.0f,
1316 4.4721f, 2.6457f, 2.236f,
1317 8.0f, 1.4142f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001318 },
1319 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001320
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001321 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001322 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001323}
1324
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001325template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001326LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(
1327 armnn::IWorkloadFactory& workloadFactory,
1328 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001329 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001330 float qScale = 1.0f,
1331 int32_t qOffset = 0)
telsoa014fcda012018-03-09 14:13:49 +00001332{
1333 armnn::Pooling2dDescriptor descriptor;
1334 descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
1335 descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
1336 descriptor.m_StrideX = descriptor.m_StrideY = 1;
1337 descriptor.m_PadLeft = 1;
1338 descriptor.m_PadRight = 1;
1339 descriptor.m_PadTop = 1;
1340 descriptor.m_PadBottom = 1;
1341 descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
1342
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001343 armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
1344 armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
telsoa014fcda012018-03-09 14:13:49 +00001345
1346 // Set quantization parameters if the requested type is a quantized type.
1347 if(armnn::IsQuantizedType<T>())
1348 {
1349 inputTensorInfo.SetQuantizationScale(qScale);
1350 inputTensorInfo.SetQuantizationOffset(qOffset);
1351 outputTensorInfo.SetQuantizationScale(qScale);
1352 outputTensorInfo.SetQuantizationOffset(qOffset);
1353 }
1354
1355 auto input = MakeTensor<T, 4>(inputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001356 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001357 1.0f, 2.0f, 3.0f, 4.0f,
1358 1.0f, 2.0f, 3.0f, 4.0f,
1359 1.0f, 2.0f, 3.0f, 4.0f,
1360 1.0f, 2.0f, 3.0f, 4.0f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001361 },
1362 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001363
1364 auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001365 QuantizedVector<T>({
telsoa014fcda012018-03-09 14:13:49 +00001366 1.0540f, 1.7638f, 2.5385f, 2.3570f,
1367 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1368 1.2909f, 2.1602f, 3.1091f, 2.8867f,
1369 1.0540f, 1.7638f, 2.5385f, 2.3570f,
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001370 },
1371 qScale, qOffset));
telsoa014fcda012018-03-09 14:13:49 +00001372
Nattapat Chaimanowong649dd952019-01-22 16:10:44 +00001373 return SimplePooling2dTestImpl<ArmnnType>(
Finn Williams826a5432020-08-27 16:15:20 +01001374 workloadFactory, memoryManager, tensorHandleFactory, descriptor, qScale, qOffset, input, outputExpected);
telsoa014fcda012018-03-09 14:13:49 +00001375}
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001376
1377} // anonymous namespace
1378
1379LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(
1380 armnn::IWorkloadFactory& workloadFactory,
1381 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001382 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001383 bool forceNoPadding)
1384{
1385 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001386 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001387}
1388
1389LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(
1390 armnn::IWorkloadFactory& workloadFactory,
1391 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001392 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001393 bool forceNoPadding)
1394{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001395 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001396 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 3.0f, -5);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001397}
1398
1399LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Int16Test(
1400 armnn::IWorkloadFactory& workloadFactory,
1401 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001402 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001403 bool forceNoPadding)
1404{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001405 return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001406 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001407}
1408
1409LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(
1410 armnn::IWorkloadFactory& workloadFactory,
1411 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001412 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001413 bool forceNoPadding)
1414{
1415 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001416 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001417}
1418
1419LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(
1420 armnn::IWorkloadFactory& workloadFactory,
1421 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001422 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001423 bool forceNoPadding)
1424{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001425 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001426 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding, 0.1f, 128);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001427}
1428
1429LayerTestResult<int16_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Int16Test(
1430 armnn::IWorkloadFactory& workloadFactory,
1431 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001432 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001433 bool forceNoPadding)
1434{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001435 return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001436 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001437}
1438
1439LayerTestResult<float, 4> SimpleMaxPooling2dTest(
1440 armnn::IWorkloadFactory& workloadFactory,
1441 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001442 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001443 const armnn::DataLayout dataLayout)
1444{
Finn Williams826a5432020-08-27 16:15:20 +01001445 return SimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1446 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001447}
1448
1449LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(
1450 armnn::IWorkloadFactory& workloadFactory,
1451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001452 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001453 const armnn::DataLayout dataLayout)
1454{
Finn Williams826a5432020-08-27 16:15:20 +01001455 return SimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
1456 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001457}
1458
1459LayerTestResult<int16_t, 4> SimpleMaxPooling2dInt16Test(
1460 armnn::IWorkloadFactory& workloadFactory,
1461 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001462 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001463 const armnn::DataLayout dataLayout)
1464{
Finn Williams826a5432020-08-27 16:15:20 +01001465 return SimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
1466 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001467}
1468LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(
1469 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001470 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1471 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001472{
Finn Williams826a5432020-08-27 16:15:20 +01001473 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::Float32>(
1474 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001475}
1476
1477LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(
1478 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001479 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1480 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001481{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001482 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001483 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001484}
1485
1486LayerTestResult<int16_t, 4> IgnorePaddingSimpleMaxPooling2dInt16Test(
1487 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001488 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1489 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001490{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001491 return IgnorePaddingSimpleMaxPooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001492 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001493}
1494
1495LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(
1496 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001497 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1498 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001499{
Finn Williams826a5432020-08-27 16:15:20 +01001500 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::Float32>(
1501 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001502}
1503
1504LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(
1505 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001506 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1507 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001508{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001509 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001510 workloadFactory, memoryManager, tensorHandleFactory, 1.0f, -5);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001511}
1512
1513LayerTestResult<int16_t, 4> IgnorePaddingMaxPooling2dSize3Int16Test(
1514 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001515 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1516 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001517{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001518 return IgnorePaddingMaxPooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001519 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001520}
1521
1522LayerTestResult<float, 4> SimpleAveragePooling2dTest(
1523 armnn::IWorkloadFactory& workloadFactory,
1524 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001525 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001526 const armnn::DataLayout dataLayout)
1527{
Finn Williams826a5432020-08-27 16:15:20 +01001528 return SimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1529 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001530}
1531
1532LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(
1533 armnn::IWorkloadFactory& workloadFactory,
1534 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001535 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001536 const armnn::DataLayout dataLayout)
1537{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001538 return SimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001539 workloadFactory, memoryManager, tensorHandleFactory, dataLayout, 0.5, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001540}
1541
1542LayerTestResult<int16_t, 4> SimpleAveragePooling2dInt16Test(
1543 armnn::IWorkloadFactory& workloadFactory,
1544 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001545 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001546 const armnn::DataLayout dataLayout)
1547{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001548 return SimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001549 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001550}
1551
1552LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(
1553 armnn::IWorkloadFactory& workloadFactory,
1554 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001555 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001556 bool forceNoPadding)
1557{
1558 return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001559 workloadFactory, memoryManager, tensorHandleFactory, forceNoPadding);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001560}
1561
1562LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(
1563 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001564 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1565 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001566{
Finn Williams826a5432020-08-27 16:15:20 +01001567 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::Float32>(
1568 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001569}
1570
1571LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(
1572 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001573 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1574 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001575{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001576 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001577 workloadFactory, memoryManager, tensorHandleFactory, 0.5, -1);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001578}
1579
1580LayerTestResult<int16_t, 4> LargeTensorsAveragePooling2dInt16Test(
1581 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001582 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1583 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001584{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001585 return LargeTensorsAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001586 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001587}
1588LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(
1589 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1591 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001592{
Finn Williams826a5432020-08-27 16:15:20 +01001593 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::Float32>(
1594 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001595}
1596
1597LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(
1598 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001599 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1600 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001601{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001602 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001603 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001604}
1605
1606LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dInt16Test(
1607 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001608 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1609 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001610{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001611 return IgnorePaddingSimpleAveragePooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001612 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001613}
1614
1615LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(
1616 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001617 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1618 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001619{
1620 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001621 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001622}
1623
1624LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
1625 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001626 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1627 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001628{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001629 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001630 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001631}
1632
1633LayerTestResult<int16_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingInt16Test(
1634 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001635 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1636 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001637{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001638 return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001639 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001640}
1641
1642LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(
1643 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001644 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1645 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001646{
Finn Williams826a5432020-08-27 16:15:20 +01001647 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::Float32>(
1648 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001649}
1650
1651LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(
1652 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001653 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1654 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001655{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001656 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001657 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001658}
1659
1660LayerTestResult<int16_t, 4> IgnorePaddingAveragePooling2dSize3Int16Test(
1661 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001662 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1663 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001664{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001665 return IgnorePaddingAveragePooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001666 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001667}
1668
1669LayerTestResult<float, 4> SimpleL2Pooling2dTest(
1670 armnn::IWorkloadFactory& workloadFactory,
1671 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001672 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001673 const armnn::DataLayout dataLayout)
1674{
Finn Williams826a5432020-08-27 16:15:20 +01001675 return SimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1676 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001677}
1678
1679LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(
1680 armnn::IWorkloadFactory& workloadFactory,
1681 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001682 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001683 const armnn::DataLayout dataLayout)
1684{
Finn Williams826a5432020-08-27 16:15:20 +01001685 return SimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1686 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001687}
1688
1689LayerTestResult<int16_t, 4> SimpleL2Pooling2dInt16Test(
1690 armnn::IWorkloadFactory& workloadFactory,
1691 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Finn Williams826a5432020-08-27 16:15:20 +01001692 const armnn::ITensorHandleFactory& tensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001693 const armnn::DataLayout dataLayout)
1694{
Finn Williams826a5432020-08-27 16:15:20 +01001695 return SimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1696 workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001697}
1698
1699LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(
1700 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001701 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1702 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001703{
Finn Williams826a5432020-08-27 16:15:20 +01001704 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::Float32>(
1705 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001706}
1707
1708LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(
1709 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001710 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1711 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001712{
Finn Williams826a5432020-08-27 16:15:20 +01001713 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QAsymmU8>(
1714 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001715}
1716
1717LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride1Int16Test(
1718 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001719 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1720 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001721{
Finn Williams826a5432020-08-27 16:15:20 +01001722 return L2Pooling2dSize3Stride1TestCommon<armnn::DataType::QSymmS16>(
1723 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001724}
1725
1726LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(
1727 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001728 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1729 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001730{
Finn Williams826a5432020-08-27 16:15:20 +01001731 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::Float32>(
1732 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001733}
1734
1735LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(
1736 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001737 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1738 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001739{
Finn Williams826a5432020-08-27 16:15:20 +01001740 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QAsymmU8>(
1741 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001742}
1743
1744LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride3Int16Test(
1745 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001746 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1747 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001748{
Finn Williams826a5432020-08-27 16:15:20 +01001749 return L2Pooling2dSize3Stride3TestCommon<armnn::DataType::QSymmS16>(
1750 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001751}
1752LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(
1753 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001754 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1755 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001756{
Finn Williams826a5432020-08-27 16:15:20 +01001757 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::Float32>(
1758 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001759}
1760
1761LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(
1762 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001763 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1764 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001765{
Finn Williams826a5432020-08-27 16:15:20 +01001766 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QAsymmU8>(
1767 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001768}
1769
1770LayerTestResult<int16_t, 4> L2Pooling2dSize3Stride4Int16Test(
1771 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001772 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1773 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001774{
Finn Williams826a5432020-08-27 16:15:20 +01001775 return L2Pooling2dSize3Stride4TestCommon<armnn::DataType::QSymmS16>(
1776 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001777}
1778
1779LayerTestResult<float, 4> L2Pooling2dSize7Test(
1780 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001781 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1782 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001783{
Finn Williams826a5432020-08-27 16:15:20 +01001784 return L2Pooling2dSize7TestCommon<armnn::DataType::Float32>(
1785 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001786}
1787
1788LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(
1789 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001790 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1791 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001792{
Finn Williams826a5432020-08-27 16:15:20 +01001793 return L2Pooling2dSize7TestCommon<armnn::DataType::QAsymmU8>(
1794 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001795}
1796
1797LayerTestResult<int16_t, 4> L2Pooling2dSize7Int16Test(
1798 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001799 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1800 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001801{
Finn Williams826a5432020-08-27 16:15:20 +01001802 return L2Pooling2dSize7TestCommon<armnn::DataType::QSymmS16>(
1803 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001804}
1805
1806LayerTestResult<float, 4> L2Pooling2dSize9Test(
1807 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001808 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1809 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001810{
Finn Williams826a5432020-08-27 16:15:20 +01001811 return L2Pooling2dSize9TestCommon<armnn::DataType::Float32>(
1812 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001813}
1814
1815LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(
1816 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001817 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1818 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001819{
Finn Williams826a5432020-08-27 16:15:20 +01001820 return L2Pooling2dSize9TestCommon<armnn::DataType::QAsymmU8>(
1821 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001822}
1823
1824LayerTestResult<int16_t, 4> L2Pooling2dSize9Int16Test(
1825 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001826 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1827 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001828{
Finn Williams826a5432020-08-27 16:15:20 +01001829 return L2Pooling2dSize9TestCommon<armnn::DataType::QSymmS16>(
1830 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001831}
1832LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(
1833 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001834 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1835 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001836{
Finn Williams826a5432020-08-27 16:15:20 +01001837 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::Float32>(
1838 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001839}
1840
1841LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(
1842 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001843 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1844 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001845{
Finn Williams826a5432020-08-27 16:15:20 +01001846 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QAsymmU8>(
1847 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001848}
1849
1850LayerTestResult<int16_t, 4> IgnorePaddingSimpleL2Pooling2dInt16Test(
1851 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001852 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1853 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001854{
Finn Williams826a5432020-08-27 16:15:20 +01001855 return IgnorePaddingSimpleL2Pooling2dTestCommon<armnn::DataType::QSymmS16>(
1856 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001857}
1858
1859LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(
1860 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001861 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1862 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001863{
Finn Williams826a5432020-08-27 16:15:20 +01001864 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::Float32>(
1865 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001866}
1867
1868LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(
1869 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1871 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001872{
Finn Williams826a5432020-08-27 16:15:20 +01001873 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QAsymmU8>(
1874 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001875}
1876
1877LayerTestResult<int16_t, 4> IgnorePaddingL2Pooling2dSize3Int16Test(
1878 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001879 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1880 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001881{
Finn Williams826a5432020-08-27 16:15:20 +01001882 return IgnorePaddingL2Pooling2dSize3TestCommon<armnn::DataType::QSymmS16>(
1883 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001884}
1885
1886LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(
1887 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001888 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1889 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001890{
Finn Williams826a5432020-08-27 16:15:20 +01001891 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::Float32>(
1892 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001893}
1894
1895LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(
1896 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001897 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1898 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001899{
Finn Williams826a5432020-08-27 16:15:20 +01001900 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QAsymmU8>(
1901 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001902}
1903
1904LayerTestResult<int16_t, 4> AsymmetricNonSquarePooling2dInt16Test(
1905 armnn::IWorkloadFactory& workloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001906 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1907 const armnn::ITensorHandleFactory& tensorHandleFactory)
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001908{
Finn Williams826a5432020-08-27 16:15:20 +01001909 return AsymmetricNonSquarePooling2dTestCommon<armnn::DataType::QSymmS16>(
1910 workloadFactory, memoryManager, tensorHandleFactory);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001911}
1912
1913LayerTestResult<float, 4> ComparePooling2dTest(
1914 armnn::IWorkloadFactory& workloadFactory,
1915 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1916 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001917 const armnn::ITensorHandleFactory& tensorHandleFactory,
1918 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001919 armnn::PoolingAlgorithm poolingType)
1920{
1921 return ComparePooling2dTestCommon<armnn::DataType::Float32>(
Finn Williams826a5432020-08-27 16:15:20 +01001922 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001923}
1924
1925LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(
1926 armnn::IWorkloadFactory& workloadFactory,
1927 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1928 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001929 const armnn::ITensorHandleFactory& tensorHandleFactory,
1930 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001931 armnn::PoolingAlgorithm poolingType)
1932{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001933 return ComparePooling2dTestCommon<armnn::DataType::QAsymmU8>(
Finn Williams826a5432020-08-27 16:15:20 +01001934 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory,
1935 poolingType, 0.1f, 128);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001936}
1937
1938LayerTestResult<int16_t, 4> ComparePooling2dInt16Test(
1939 armnn::IWorkloadFactory& workloadFactory,
1940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1941 armnn::IWorkloadFactory& refWorkloadFactory,
Finn Williams826a5432020-08-27 16:15:20 +01001942 const armnn::ITensorHandleFactory& tensorHandleFactory,
1943 const armnn::ITensorHandleFactory& refTensorHandleFactory,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001944 armnn::PoolingAlgorithm poolingType)
1945{
Derek Lambertif90c56d2020-01-10 17:14:08 +00001946 return ComparePooling2dTestCommon<armnn::DataType::QSymmS16>(
Finn Williams826a5432020-08-27 16:15:20 +01001947 workloadFactory, memoryManager, refWorkloadFactory, tensorHandleFactory, refTensorHandleFactory, poolingType);
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001948}