blob: cf83f8ce2bb9cf14a117712d8f8fd3117a59b842 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "Pooling2d.hpp"
Teresa Charlina3b20472019-06-06 11:12:32 +01007#include "DataLayoutIndexed.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
9#include <armnn/Exceptions.hpp>
10#include <armnn/Types.hpp>
11
12#include <boost/numeric/conversion/cast.hpp>
13
14#include <limits>
15#include <algorithm>
16#include <functional>
17
18namespace
19{
20 using PoolingAlgorithm = armnn::PoolingAlgorithm;
21
22 float DefaultInitializer(PoolingAlgorithm algorithm)
23 {
24 switch (algorithm)
25 {
26 case PoolingAlgorithm::Max:
27 {
28 return std::numeric_limits<float>::lowest();
29 }
30 case PoolingAlgorithm::Average:
31 case PoolingAlgorithm::L2:
32 {
33 return 0.0f;
34 }
35 default:
36 {
37 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
38 }
39 }
40 }
41
42 using Accumulator = std::function<void(float & accu, float value)>;
43
44 Accumulator GetAccumulator(PoolingAlgorithm algorithm)
45 {
46 switch (algorithm)
47 {
48 case PoolingAlgorithm::Max:
49 {
50 return [](float & accu, float value) {
51 if (value > accu) {
52 accu = value;
53 }
54 };
55 }
56
57 case PoolingAlgorithm::Average:
58 {
59 return [](float & accu, float value) {
60 accu += value;
61 };
62 }
63
64 case PoolingAlgorithm::L2:
65 {
66 return [](float & accu, float value) {
67 accu += (value*value);
68 };
69 }
70
71 default:
72 {
73 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
74 }
75 }
76 }
77
78 using Executor = std::function<void(float & accumulated, float kernelSize)>;
79
80 Executor GetExecutor(PoolingAlgorithm algorithm)
81 {
82 switch (algorithm)
83 {
84 case PoolingAlgorithm::Max:
85 {
86 return [](float & accumulated, float kernelSize) {};
87 }
88
89 case PoolingAlgorithm::Average:
90 {
91 return [](float & accumulated, float kernelSize) {
92 accumulated /= kernelSize;
93 };
94 }
95
96 case PoolingAlgorithm::L2:
97 {
98 return [](float & accumulated, float kernelSize) {
99 accumulated = sqrtf(accumulated / kernelSize);
100 };
101 }
102
103 default:
104 {
105 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
106 }
107 }
108 }
109
Finn Williams70f609b2019-11-06 16:54:53 +0000110 bool OnPaddingOnly(int start, int end, int maxRange)
telsoa014fcda012018-03-09 14:13:49 +0000111 {
Finn Williams70f609b2019-11-06 16:54:53 +0000112 if (end <= 0 || start > maxRange)
telsoa014fcda012018-03-09 14:13:49 +0000113 {
114 return true;
115 }
116 else
117 {
118 return false;
119 }
120 }
121
122
123 bool ClampRange(int & start, int & end, int maxRange)
124 {
125 if (start < 0 || end > maxRange)
126 {
127 start = std::min(std::max(start, 0), maxRange);
128 end = std::min(std::max(end, 0), maxRange);
129 return true;
130 }
131 else
132 {
133 return false;
134 }
135 }
136}
137
Matteo Martincigh21350152018-11-28 16:22:22 +0000138using namespace armnnUtils;
139
telsoa014fcda012018-03-09 14:13:49 +0000140namespace armnn
141{
Teresa Charlina3b20472019-06-06 11:12:32 +0100142void Pooling2d(Decoder<float>& rInputDecoder,
143 Encoder<float>& rOutputEncoder,
telsoa014fcda012018-03-09 14:13:49 +0000144 const TensorInfo& inputInfo,
145 const TensorInfo& outputInfo,
146 const Pooling2dDescriptor& params)
147{
Teresa Charlina3b20472019-06-06 11:12:32 +0100148 const DataLayoutIndexed dataLayout(params.m_DataLayout);
James Conroy45a9b772018-10-31 11:47:53 +0000149 auto channelsIndex = dataLayout.GetChannelsIndex();
150 auto heightIndex = dataLayout.GetHeightIndex();
151 auto widthIndex = dataLayout.GetWidthIndex();
James Conroy69482272018-10-19 10:41:35 +0100152
telsoa014fcda012018-03-09 14:13:49 +0000153 const int batchSize = boost::numeric_cast<int>(outputInfo.GetShape()[0]);
James Conroy69482272018-10-19 10:41:35 +0100154 const int channels = boost::numeric_cast<int>(outputInfo.GetShape()[channelsIndex]);
155 const int heightOutput = boost::numeric_cast<int>(outputInfo.GetShape()[heightIndex]);
156 const int widthOutput = boost::numeric_cast<int>(outputInfo.GetShape()[widthIndex]);
157 const int heightInput = boost::numeric_cast<int>(inputInfo.GetShape()[heightIndex]);
158 const int widthInput = boost::numeric_cast<int>(inputInfo.GetShape()[widthIndex]);
telsoa014fcda012018-03-09 14:13:49 +0000159 const int padLeft = boost::numeric_cast<int>(params.m_PadLeft);
160 const int padRight = boost::numeric_cast<int>(params.m_PadRight);
161 const int padTop = boost::numeric_cast<int>(params.m_PadTop);
162 const int padBottom = boost::numeric_cast<int>(params.m_PadBottom);
163 const int strideX = boost::numeric_cast<int>(params.m_StrideX);
164 const int strideY = boost::numeric_cast<int>(params.m_StrideY);
165 const int poolHeight = boost::numeric_cast<int>(params.m_PoolHeight);
166 const int poolWidth = boost::numeric_cast<int>(params.m_PoolWidth);
167
168 float defaultInitializer = DefaultInitializer(params.m_PoolType);
169
170 Accumulator accumulate = GetAccumulator(params.m_PoolType);
171 Executor execute = GetExecutor(params.m_PoolType);
172
Teresa Charlina3b20472019-06-06 11:12:32 +0100173 TensorShape outputShape = outputInfo.GetShape();
174 TensorShape inputShape = inputInfo.GetShape();
James Conroy45a9b772018-10-31 11:47:53 +0000175
telsoa014fcda012018-03-09 14:13:49 +0000176 // Check supported padding methods outside the loop to simplify
telsoa01c577f2c2018-08-31 09:22:23 +0100177 // the inner loop.
telsoa014fcda012018-03-09 14:13:49 +0000178 if (params.m_PaddingMethod != PaddingMethod::Exclude &&
179 params.m_PaddingMethod != PaddingMethod::IgnoreValue)
180 {
181 throw armnn::InvalidArgumentException("Unsupported padding type");
182 }
183
184 for (int n = 0; n < batchSize; n++)
185 {
186 for (int c = 0; c < channels; c++)
187 {
188 for (int yOutput = 0; yOutput < heightOutput; yOutput++)
189 {
Finn Williams70f609b2019-11-06 16:54:53 +0000190 // Calculate values independent of the x axis
191 int hstart = (yOutput * strideY) - padTop;
192 int hend = hstart + poolHeight;
193 // Clamp the pooling region inside the valid input area (which includes the padding).
194 // This is necessary because the final pooling in a row may overlap beyond the padding.
195 hend = std::min(hend, heightInput + padBottom);
196
197 int height = hend - hstart;
198 bool hclamped = ClampRange(hstart, hend, heightInput);
199
telsoa014fcda012018-03-09 14:13:49 +0000200 for (int xOutput = 0; xOutput < widthOutput; xOutput++)
201 {
telsoa014fcda012018-03-09 14:13:49 +0000202 int wstart = (xOutput * strideX) - padLeft;
telsoa014fcda012018-03-09 14:13:49 +0000203 int wend = wstart + poolWidth;
204
205 // Clamp the pooling region inside the valid input area (which includes the padding).
206 // This is necessary because the final pooling in a row may overlap beyond the padding.
surmeh01bceff2f2018-03-29 16:29:27 +0100207 wend = std::min(wend, widthInput + padRight);
telsoa014fcda012018-03-09 14:13:49 +0000208
209 float result = defaultInitializer;
Finn Williams70f609b2019-11-06 16:54:53 +0000210 float poolAreaSize = boost::numeric_cast<float>(height * (wend - wstart));
telsoa014fcda012018-03-09 14:13:49 +0000211
telsoa01c577f2c2018-08-31 09:22:23 +0100212 // Special case: when the pooling kernel is over a padding region and the padding
telsoa014fcda012018-03-09 14:13:49 +0000213 // size is larger or equal to the kernel and the kernel only covers
214 // padding and no real values, then we initialize the result as zero
215 // by convention. This is because we need to choose a value here and
216 // all values we have are padding, which we ignore.
Finn Williams70f609b2019-11-06 16:54:53 +0000217 if (OnPaddingOnly(hstart, hend, heightInput) ||
218 OnPaddingOnly(wstart, wend, widthInput))
telsoa014fcda012018-03-09 14:13:49 +0000219 {
220 result = 0.0f;
Finn Williams70f609b2019-11-06 16:54:53 +0000221
222 unsigned int outputIndex = dataLayout.GetIndex(outputShape,
223 boost::numeric_cast<unsigned int>(n),
224 boost::numeric_cast<unsigned int>(c),
225 boost::numeric_cast<unsigned int>(yOutput),
226 boost::numeric_cast<unsigned int>(xOutput));
227 rOutputEncoder[outputIndex];
228 rOutputEncoder.Set(result);
229 continue;
telsoa014fcda012018-03-09 14:13:49 +0000230 }
231
Finn Williams70f609b2019-11-06 16:54:53 +0000232 bool clamped = hclamped |= ClampRange(wstart, wend, widthInput);
telsoa014fcda012018-03-09 14:13:49 +0000233
234 if (clamped && params.m_PaddingMethod == PaddingMethod::Exclude)
235 {
telsoa01c577f2c2018-08-31 09:22:23 +0100236 // When we exclude the padding, it means we calculate with a smaller
237 // kernel size, so I changed the divisor here.
telsoa014fcda012018-03-09 14:13:49 +0000238 poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
239 }
240
241 for (auto yInput = hstart; yInput < hend; yInput++)
242 {
243 for (auto xInput = wstart; xInput < wend; xInput++)
244 {
Teresa Charlina3b20472019-06-06 11:12:32 +0100245 unsigned int inputIndex = dataLayout.GetIndex(inputShape,
246 boost::numeric_cast<unsigned int>(n),
247 boost::numeric_cast<unsigned int>(c),
248 boost::numeric_cast<unsigned int>(yInput),
249 boost::numeric_cast<unsigned int>(xInput));
250
251 rInputDecoder[inputIndex];
252 float inval = rInputDecoder.Get();
telsoa014fcda012018-03-09 14:13:49 +0000253
254 accumulate(result, inval);
255 }
256 }
257
258 execute(result, poolAreaSize);
259
Teresa Charlina3b20472019-06-06 11:12:32 +0100260 unsigned int outputIndex = dataLayout.GetIndex(outputShape,
261 boost::numeric_cast<unsigned int>(n),
262 boost::numeric_cast<unsigned int>(c),
263 boost::numeric_cast<unsigned int>(yOutput),
264 boost::numeric_cast<unsigned int>(xOutput));
265
266 rOutputEncoder[outputIndex];
267 rOutputEncoder.Set(result);
telsoa014fcda012018-03-09 14:13:49 +0000268 }
269 }
270 }
271 }
272}
273
274} //namespace armnn