blob: f2532cac03d54a7d887196d3878676f03458b4bf [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "Pooling2d.hpp"
Teresa Charlina3b20472019-06-06 11:12:32 +01007#include "DataLayoutIndexed.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
9#include <armnn/Exceptions.hpp>
10#include <armnn/Types.hpp>
11
12#include <boost/numeric/conversion/cast.hpp>
13
14#include <limits>
15#include <algorithm>
16#include <functional>
17
18namespace
19{
20 using PoolingAlgorithm = armnn::PoolingAlgorithm;
21
22 float DefaultInitializer(PoolingAlgorithm algorithm)
23 {
24 switch (algorithm)
25 {
26 case PoolingAlgorithm::Max:
27 {
28 return std::numeric_limits<float>::lowest();
29 }
30 case PoolingAlgorithm::Average:
31 case PoolingAlgorithm::L2:
32 {
33 return 0.0f;
34 }
35 default:
36 {
37 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
38 }
39 }
40 }
41
42 using Accumulator = std::function<void(float & accu, float value)>;
43
44 Accumulator GetAccumulator(PoolingAlgorithm algorithm)
45 {
46 switch (algorithm)
47 {
48 case PoolingAlgorithm::Max:
49 {
50 return [](float & accu, float value) {
51 if (value > accu) {
52 accu = value;
53 }
54 };
55 }
56
57 case PoolingAlgorithm::Average:
58 {
59 return [](float & accu, float value) {
60 accu += value;
61 };
62 }
63
64 case PoolingAlgorithm::L2:
65 {
66 return [](float & accu, float value) {
67 accu += (value*value);
68 };
69 }
70
71 default:
72 {
73 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
74 }
75 }
76 }
77
78 using Executor = std::function<void(float & accumulated, float kernelSize)>;
79
80 Executor GetExecutor(PoolingAlgorithm algorithm)
81 {
82 switch (algorithm)
83 {
84 case PoolingAlgorithm::Max:
85 {
86 return [](float & accumulated, float kernelSize) {};
87 }
88
89 case PoolingAlgorithm::Average:
90 {
91 return [](float & accumulated, float kernelSize) {
92 accumulated /= kernelSize;
93 };
94 }
95
96 case PoolingAlgorithm::L2:
97 {
98 return [](float & accumulated, float kernelSize) {
99 accumulated = sqrtf(accumulated / kernelSize);
100 };
101 }
102
103 default:
104 {
105 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
106 }
107 }
108 }
109
110 bool OnPaddingOnly(int start, int end, int maxRange, int padding)
111 {
112 if (end <= 0 || start > (maxRange - padding))
113 {
114 return true;
115 }
116 else
117 {
118 return false;
119 }
120 }
121
122
123 bool ClampRange(int & start, int & end, int maxRange)
124 {
125 if (start < 0 || end > maxRange)
126 {
127 start = std::min(std::max(start, 0), maxRange);
128 end = std::min(std::max(end, 0), maxRange);
129 return true;
130 }
131 else
132 {
133 return false;
134 }
135 }
136}
137
Matteo Martincigh21350152018-11-28 16:22:22 +0000138using namespace armnnUtils;
139
telsoa014fcda012018-03-09 14:13:49 +0000140namespace armnn
141{
Teresa Charlina3b20472019-06-06 11:12:32 +0100142void Pooling2d(Decoder<float>& rInputDecoder,
143 Encoder<float>& rOutputEncoder,
telsoa014fcda012018-03-09 14:13:49 +0000144 const TensorInfo& inputInfo,
145 const TensorInfo& outputInfo,
146 const Pooling2dDescriptor& params)
147{
Teresa Charlina3b20472019-06-06 11:12:32 +0100148 const DataLayoutIndexed dataLayout(params.m_DataLayout);
James Conroy45a9b772018-10-31 11:47:53 +0000149 auto channelsIndex = dataLayout.GetChannelsIndex();
150 auto heightIndex = dataLayout.GetHeightIndex();
151 auto widthIndex = dataLayout.GetWidthIndex();
James Conroy69482272018-10-19 10:41:35 +0100152
telsoa014fcda012018-03-09 14:13:49 +0000153 const int batchSize = boost::numeric_cast<int>(outputInfo.GetShape()[0]);
James Conroy69482272018-10-19 10:41:35 +0100154 const int channels = boost::numeric_cast<int>(outputInfo.GetShape()[channelsIndex]);
155 const int heightOutput = boost::numeric_cast<int>(outputInfo.GetShape()[heightIndex]);
156 const int widthOutput = boost::numeric_cast<int>(outputInfo.GetShape()[widthIndex]);
157 const int heightInput = boost::numeric_cast<int>(inputInfo.GetShape()[heightIndex]);
158 const int widthInput = boost::numeric_cast<int>(inputInfo.GetShape()[widthIndex]);
telsoa014fcda012018-03-09 14:13:49 +0000159 const int padLeft = boost::numeric_cast<int>(params.m_PadLeft);
160 const int padRight = boost::numeric_cast<int>(params.m_PadRight);
161 const int padTop = boost::numeric_cast<int>(params.m_PadTop);
162 const int padBottom = boost::numeric_cast<int>(params.m_PadBottom);
163 const int strideX = boost::numeric_cast<int>(params.m_StrideX);
164 const int strideY = boost::numeric_cast<int>(params.m_StrideY);
165 const int poolHeight = boost::numeric_cast<int>(params.m_PoolHeight);
166 const int poolWidth = boost::numeric_cast<int>(params.m_PoolWidth);
167
168 float defaultInitializer = DefaultInitializer(params.m_PoolType);
169
170 Accumulator accumulate = GetAccumulator(params.m_PoolType);
171 Executor execute = GetExecutor(params.m_PoolType);
172
Teresa Charlina3b20472019-06-06 11:12:32 +0100173 TensorShape outputShape = outputInfo.GetShape();
174 TensorShape inputShape = inputInfo.GetShape();
James Conroy45a9b772018-10-31 11:47:53 +0000175
telsoa014fcda012018-03-09 14:13:49 +0000176 // Check supported padding methods outside the loop to simplify
telsoa01c577f2c2018-08-31 09:22:23 +0100177 // the inner loop.
telsoa014fcda012018-03-09 14:13:49 +0000178 if (params.m_PaddingMethod != PaddingMethod::Exclude &&
179 params.m_PaddingMethod != PaddingMethod::IgnoreValue)
180 {
181 throw armnn::InvalidArgumentException("Unsupported padding type");
182 }
183
184 for (int n = 0; n < batchSize; n++)
185 {
186 for (int c = 0; c < channels; c++)
187 {
188 for (int yOutput = 0; yOutput < heightOutput; yOutput++)
189 {
190 for (int xOutput = 0; xOutput < widthOutput; xOutput++)
191 {
192 int hstart = (yOutput * strideY) - padTop;
193 int wstart = (xOutput * strideX) - padLeft;
194 int hend = hstart + poolHeight;
195 int wend = wstart + poolWidth;
196
197 // Clamp the pooling region inside the valid input area (which includes the padding).
198 // This is necessary because the final pooling in a row may overlap beyond the padding.
surmeh01bceff2f2018-03-29 16:29:27 +0100199 hend = std::min(hend, heightInput + padBottom);
200 wend = std::min(wend, widthInput + padRight);
telsoa014fcda012018-03-09 14:13:49 +0000201
202 float result = defaultInitializer;
203 float poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
204
telsoa01c577f2c2018-08-31 09:22:23 +0100205 // Special case: when the pooling kernel is over a padding region and the padding
telsoa014fcda012018-03-09 14:13:49 +0000206 // size is larger or equal to the kernel and the kernel only covers
207 // padding and no real values, then we initialize the result as zero
208 // by convention. This is because we need to choose a value here and
209 // all values we have are padding, which we ignore.
210 if (OnPaddingOnly(hstart, hend, heightInput, padBottom) ||
211 OnPaddingOnly(wstart, wend, widthInput, padRight))
212 {
213 result = 0.0f;
214 }
215
216 bool clamped = ClampRange(wstart, wend, widthInput);
217 clamped |= ClampRange(hstart, hend, heightInput);
218
219 if (clamped && params.m_PaddingMethod == PaddingMethod::Exclude)
220 {
telsoa01c577f2c2018-08-31 09:22:23 +0100221 // When we exclude the padding, it means we calculate with a smaller
222 // kernel size, so I changed the divisor here.
telsoa014fcda012018-03-09 14:13:49 +0000223 poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
224 }
225
226 for (auto yInput = hstart; yInput < hend; yInput++)
227 {
228 for (auto xInput = wstart; xInput < wend; xInput++)
229 {
Teresa Charlina3b20472019-06-06 11:12:32 +0100230 unsigned int inputIndex = dataLayout.GetIndex(inputShape,
231 boost::numeric_cast<unsigned int>(n),
232 boost::numeric_cast<unsigned int>(c),
233 boost::numeric_cast<unsigned int>(yInput),
234 boost::numeric_cast<unsigned int>(xInput));
235
236 rInputDecoder[inputIndex];
237 float inval = rInputDecoder.Get();
telsoa014fcda012018-03-09 14:13:49 +0000238
239 accumulate(result, inval);
240 }
241 }
242
243 execute(result, poolAreaSize);
244
Teresa Charlina3b20472019-06-06 11:12:32 +0100245 unsigned int outputIndex = dataLayout.GetIndex(outputShape,
246 boost::numeric_cast<unsigned int>(n),
247 boost::numeric_cast<unsigned int>(c),
248 boost::numeric_cast<unsigned int>(yOutput),
249 boost::numeric_cast<unsigned int>(xOutput));
250
251 rOutputEncoder[outputIndex];
252 rOutputEncoder.Set(result);
telsoa014fcda012018-03-09 14:13:49 +0000253 }
254 }
255 }
256 }
257}
258
259} //namespace armnn