blob: a9cac32ced35bd0f06b41aefe93eb1fb428a6388 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "Pooling2d.hpp"
James Conroy45a9b772018-10-31 11:47:53 +00007#include "TensorBufferArrayView.hpp"
telsoa014fcda012018-03-09 14:13:49 +00008
9#include <armnn/Exceptions.hpp>
10#include <armnn/Types.hpp>
11
12#include <boost/numeric/conversion/cast.hpp>
13
14#include <limits>
15#include <algorithm>
16#include <functional>
17
18namespace
19{
20 using PoolingAlgorithm = armnn::PoolingAlgorithm;
21
22 float DefaultInitializer(PoolingAlgorithm algorithm)
23 {
24 switch (algorithm)
25 {
26 case PoolingAlgorithm::Max:
27 {
28 return std::numeric_limits<float>::lowest();
29 }
30 case PoolingAlgorithm::Average:
31 case PoolingAlgorithm::L2:
32 {
33 return 0.0f;
34 }
35 default:
36 {
37 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
38 }
39 }
40 }
41
42 using Accumulator = std::function<void(float & accu, float value)>;
43
44 Accumulator GetAccumulator(PoolingAlgorithm algorithm)
45 {
46 switch (algorithm)
47 {
48 case PoolingAlgorithm::Max:
49 {
50 return [](float & accu, float value) {
51 if (value > accu) {
52 accu = value;
53 }
54 };
55 }
56
57 case PoolingAlgorithm::Average:
58 {
59 return [](float & accu, float value) {
60 accu += value;
61 };
62 }
63
64 case PoolingAlgorithm::L2:
65 {
66 return [](float & accu, float value) {
67 accu += (value*value);
68 };
69 }
70
71 default:
72 {
73 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
74 }
75 }
76 }
77
78 using Executor = std::function<void(float & accumulated, float kernelSize)>;
79
80 Executor GetExecutor(PoolingAlgorithm algorithm)
81 {
82 switch (algorithm)
83 {
84 case PoolingAlgorithm::Max:
85 {
86 return [](float & accumulated, float kernelSize) {};
87 }
88
89 case PoolingAlgorithm::Average:
90 {
91 return [](float & accumulated, float kernelSize) {
92 accumulated /= kernelSize;
93 };
94 }
95
96 case PoolingAlgorithm::L2:
97 {
98 return [](float & accumulated, float kernelSize) {
99 accumulated = sqrtf(accumulated / kernelSize);
100 };
101 }
102
103 default:
104 {
105 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
106 }
107 }
108 }
109
110 bool OnPaddingOnly(int start, int end, int maxRange, int padding)
111 {
112 if (end <= 0 || start > (maxRange - padding))
113 {
114 return true;
115 }
116 else
117 {
118 return false;
119 }
120 }
121
122
123 bool ClampRange(int & start, int & end, int maxRange)
124 {
125 if (start < 0 || end > maxRange)
126 {
127 start = std::min(std::max(start, 0), maxRange);
128 end = std::min(std::max(end, 0), maxRange);
129 return true;
130 }
131 else
132 {
133 return false;
134 }
135 }
136}
137
Matteo Martincigh21350152018-11-28 16:22:22 +0000138using namespace armnnUtils;
139
telsoa014fcda012018-03-09 14:13:49 +0000140namespace armnn
141{
142
143void Pooling2d(const float* in,
144 float* out,
145 const TensorInfo& inputInfo,
146 const TensorInfo& outputInfo,
147 const Pooling2dDescriptor& params)
148{
Matteo Martincigh21350152018-11-28 16:22:22 +0000149 const DataLayoutIndexed dataLayout = params.m_DataLayout;
James Conroy45a9b772018-10-31 11:47:53 +0000150 auto channelsIndex = dataLayout.GetChannelsIndex();
151 auto heightIndex = dataLayout.GetHeightIndex();
152 auto widthIndex = dataLayout.GetWidthIndex();
James Conroy69482272018-10-19 10:41:35 +0100153
telsoa014fcda012018-03-09 14:13:49 +0000154 const int batchSize = boost::numeric_cast<int>(outputInfo.GetShape()[0]);
James Conroy69482272018-10-19 10:41:35 +0100155 const int channels = boost::numeric_cast<int>(outputInfo.GetShape()[channelsIndex]);
156 const int heightOutput = boost::numeric_cast<int>(outputInfo.GetShape()[heightIndex]);
157 const int widthOutput = boost::numeric_cast<int>(outputInfo.GetShape()[widthIndex]);
158 const int heightInput = boost::numeric_cast<int>(inputInfo.GetShape()[heightIndex]);
159 const int widthInput = boost::numeric_cast<int>(inputInfo.GetShape()[widthIndex]);
telsoa014fcda012018-03-09 14:13:49 +0000160 const int padLeft = boost::numeric_cast<int>(params.m_PadLeft);
161 const int padRight = boost::numeric_cast<int>(params.m_PadRight);
162 const int padTop = boost::numeric_cast<int>(params.m_PadTop);
163 const int padBottom = boost::numeric_cast<int>(params.m_PadBottom);
164 const int strideX = boost::numeric_cast<int>(params.m_StrideX);
165 const int strideY = boost::numeric_cast<int>(params.m_StrideY);
166 const int poolHeight = boost::numeric_cast<int>(params.m_PoolHeight);
167 const int poolWidth = boost::numeric_cast<int>(params.m_PoolWidth);
168
169 float defaultInitializer = DefaultInitializer(params.m_PoolType);
170
171 Accumulator accumulate = GetAccumulator(params.m_PoolType);
172 Executor execute = GetExecutor(params.m_PoolType);
173
James Conroy45a9b772018-10-31 11:47:53 +0000174 TensorBufferArrayView<const float> input(inputInfo.GetShape(), in, dataLayout);
175 TensorBufferArrayView<float> output(outputInfo.GetShape(), out, dataLayout);
176
telsoa014fcda012018-03-09 14:13:49 +0000177 // Check supported padding methods outside the loop to simplify
telsoa01c577f2c2018-08-31 09:22:23 +0100178 // the inner loop.
telsoa014fcda012018-03-09 14:13:49 +0000179 if (params.m_PaddingMethod != PaddingMethod::Exclude &&
180 params.m_PaddingMethod != PaddingMethod::IgnoreValue)
181 {
182 throw armnn::InvalidArgumentException("Unsupported padding type");
183 }
184
185 for (int n = 0; n < batchSize; n++)
186 {
187 for (int c = 0; c < channels; c++)
188 {
189 for (int yOutput = 0; yOutput < heightOutput; yOutput++)
190 {
191 for (int xOutput = 0; xOutput < widthOutput; xOutput++)
192 {
193 int hstart = (yOutput * strideY) - padTop;
194 int wstart = (xOutput * strideX) - padLeft;
195 int hend = hstart + poolHeight;
196 int wend = wstart + poolWidth;
197
198 // Clamp the pooling region inside the valid input area (which includes the padding).
199 // This is necessary because the final pooling in a row may overlap beyond the padding.
surmeh01bceff2f2018-03-29 16:29:27 +0100200 hend = std::min(hend, heightInput + padBottom);
201 wend = std::min(wend, widthInput + padRight);
telsoa014fcda012018-03-09 14:13:49 +0000202
203 float result = defaultInitializer;
204 float poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
205
telsoa01c577f2c2018-08-31 09:22:23 +0100206 // Special case: when the pooling kernel is over a padding region and the padding
telsoa014fcda012018-03-09 14:13:49 +0000207 // size is larger or equal to the kernel and the kernel only covers
208 // padding and no real values, then we initialize the result as zero
209 // by convention. This is because we need to choose a value here and
210 // all values we have are padding, which we ignore.
211 if (OnPaddingOnly(hstart, hend, heightInput, padBottom) ||
212 OnPaddingOnly(wstart, wend, widthInput, padRight))
213 {
214 result = 0.0f;
215 }
216
217 bool clamped = ClampRange(wstart, wend, widthInput);
218 clamped |= ClampRange(hstart, hend, heightInput);
219
220 if (clamped && params.m_PaddingMethod == PaddingMethod::Exclude)
221 {
telsoa01c577f2c2018-08-31 09:22:23 +0100222 // When we exclude the padding, it means we calculate with a smaller
223 // kernel size, so I changed the divisor here.
telsoa014fcda012018-03-09 14:13:49 +0000224 poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
225 }
226
227 for (auto yInput = hstart; yInput < hend; yInput++)
228 {
229 for (auto xInput = wstart; xInput < wend; xInput++)
230 {
James Conroy45a9b772018-10-31 11:47:53 +0000231 float inval = input.Get(boost::numeric_cast<unsigned int>(n),
232 boost::numeric_cast<unsigned int>(c),
233 boost::numeric_cast<unsigned int>(yInput),
234 boost::numeric_cast<unsigned int>(xInput));
telsoa014fcda012018-03-09 14:13:49 +0000235
236 accumulate(result, inval);
237 }
238 }
239
240 execute(result, poolAreaSize);
241
James Conroy45a9b772018-10-31 11:47:53 +0000242 output.Get(boost::numeric_cast<unsigned int>(n),
243 boost::numeric_cast<unsigned int>(c),
244 boost::numeric_cast<unsigned int>(yOutput),
245 boost::numeric_cast<unsigned int>(xOutput)) = result;
telsoa014fcda012018-03-09 14:13:49 +0000246 }
247 }
248 }
249 }
250}
251
252} //namespace armnn