blob: 989092011371fc6a9cbf9c969aa6cabdc39f0472 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "Pooling2d.hpp"
7
8#include <armnn/Exceptions.hpp>
9#include <armnn/Types.hpp>
10
11#include <boost/numeric/conversion/cast.hpp>
12
13#include <limits>
14#include <algorithm>
15#include <functional>
16
17namespace
18{
19 using PoolingAlgorithm = armnn::PoolingAlgorithm;
20
21 float DefaultInitializer(PoolingAlgorithm algorithm)
22 {
23 switch (algorithm)
24 {
25 case PoolingAlgorithm::Max:
26 {
27 return std::numeric_limits<float>::lowest();
28 }
29 case PoolingAlgorithm::Average:
30 case PoolingAlgorithm::L2:
31 {
32 return 0.0f;
33 }
34 default:
35 {
36 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
37 }
38 }
39 }
40
41 using Accumulator = std::function<void(float & accu, float value)>;
42
43 Accumulator GetAccumulator(PoolingAlgorithm algorithm)
44 {
45 switch (algorithm)
46 {
47 case PoolingAlgorithm::Max:
48 {
49 return [](float & accu, float value) {
50 if (value > accu) {
51 accu = value;
52 }
53 };
54 }
55
56 case PoolingAlgorithm::Average:
57 {
58 return [](float & accu, float value) {
59 accu += value;
60 };
61 }
62
63 case PoolingAlgorithm::L2:
64 {
65 return [](float & accu, float value) {
66 accu += (value*value);
67 };
68 }
69
70 default:
71 {
72 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
73 }
74 }
75 }
76
77 using Executor = std::function<void(float & accumulated, float kernelSize)>;
78
79 Executor GetExecutor(PoolingAlgorithm algorithm)
80 {
81 switch (algorithm)
82 {
83 case PoolingAlgorithm::Max:
84 {
85 return [](float & accumulated, float kernelSize) {};
86 }
87
88 case PoolingAlgorithm::Average:
89 {
90 return [](float & accumulated, float kernelSize) {
91 accumulated /= kernelSize;
92 };
93 }
94
95 case PoolingAlgorithm::L2:
96 {
97 return [](float & accumulated, float kernelSize) {
98 accumulated = sqrtf(accumulated / kernelSize);
99 };
100 }
101
102 default:
103 {
104 throw armnn::InvalidArgumentException("Unsupported pooling algorithm");
105 }
106 }
107 }
108
109 bool OnPaddingOnly(int start, int end, int maxRange, int padding)
110 {
111 if (end <= 0 || start > (maxRange - padding))
112 {
113 return true;
114 }
115 else
116 {
117 return false;
118 }
119 }
120
121
122 bool ClampRange(int & start, int & end, int maxRange)
123 {
124 if (start < 0 || end > maxRange)
125 {
126 start = std::min(std::max(start, 0), maxRange);
127 end = std::min(std::max(end, 0), maxRange);
128 return true;
129 }
130 else
131 {
132 return false;
133 }
134 }
135}
136
137namespace armnn
138{
139
140void Pooling2d(const float* in,
141 float* out,
142 const TensorInfo& inputInfo,
143 const TensorInfo& outputInfo,
144 const Pooling2dDescriptor& params)
145{
James Conroy69482272018-10-19 10:41:35 +0100146 const unsigned int channelsIndex = params.m_DataLayout.GetChannelsIndex();
147 const unsigned int heightIndex = params.m_DataLayout.GetHeightIndex();
148 const unsigned int widthIndex = params.m_DataLayout.GetWidthIndex();
149
telsoa014fcda012018-03-09 14:13:49 +0000150 const int batchSize = boost::numeric_cast<int>(outputInfo.GetShape()[0]);
James Conroy69482272018-10-19 10:41:35 +0100151 const int channels = boost::numeric_cast<int>(outputInfo.GetShape()[channelsIndex]);
152 const int heightOutput = boost::numeric_cast<int>(outputInfo.GetShape()[heightIndex]);
153 const int widthOutput = boost::numeric_cast<int>(outputInfo.GetShape()[widthIndex]);
154 const int heightInput = boost::numeric_cast<int>(inputInfo.GetShape()[heightIndex]);
155 const int widthInput = boost::numeric_cast<int>(inputInfo.GetShape()[widthIndex]);
telsoa014fcda012018-03-09 14:13:49 +0000156 const int padLeft = boost::numeric_cast<int>(params.m_PadLeft);
157 const int padRight = boost::numeric_cast<int>(params.m_PadRight);
158 const int padTop = boost::numeric_cast<int>(params.m_PadTop);
159 const int padBottom = boost::numeric_cast<int>(params.m_PadBottom);
160 const int strideX = boost::numeric_cast<int>(params.m_StrideX);
161 const int strideY = boost::numeric_cast<int>(params.m_StrideY);
162 const int poolHeight = boost::numeric_cast<int>(params.m_PoolHeight);
163 const int poolWidth = boost::numeric_cast<int>(params.m_PoolWidth);
164
165 float defaultInitializer = DefaultInitializer(params.m_PoolType);
166
167 Accumulator accumulate = GetAccumulator(params.m_PoolType);
168 Executor execute = GetExecutor(params.m_PoolType);
169
170 // Check supported padding methods outside the loop to simplify
telsoa01c577f2c2018-08-31 09:22:23 +0100171 // the inner loop.
telsoa014fcda012018-03-09 14:13:49 +0000172 if (params.m_PaddingMethod != PaddingMethod::Exclude &&
173 params.m_PaddingMethod != PaddingMethod::IgnoreValue)
174 {
175 throw armnn::InvalidArgumentException("Unsupported padding type");
176 }
177
178 for (int n = 0; n < batchSize; n++)
179 {
180 for (int c = 0; c < channels; c++)
181 {
182 for (int yOutput = 0; yOutput < heightOutput; yOutput++)
183 {
184 for (int xOutput = 0; xOutput < widthOutput; xOutput++)
185 {
186 int hstart = (yOutput * strideY) - padTop;
187 int wstart = (xOutput * strideX) - padLeft;
188 int hend = hstart + poolHeight;
189 int wend = wstart + poolWidth;
190
191 // Clamp the pooling region inside the valid input area (which includes the padding).
192 // This is necessary because the final pooling in a row may overlap beyond the padding.
surmeh01bceff2f2018-03-29 16:29:27 +0100193 hend = std::min(hend, heightInput + padBottom);
194 wend = std::min(wend, widthInput + padRight);
telsoa014fcda012018-03-09 14:13:49 +0000195
196 float result = defaultInitializer;
197 float poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
198
telsoa01c577f2c2018-08-31 09:22:23 +0100199 // Special case: when the pooling kernel is over a padding region and the padding
telsoa014fcda012018-03-09 14:13:49 +0000200 // size is larger or equal to the kernel and the kernel only covers
201 // padding and no real values, then we initialize the result as zero
202 // by convention. This is because we need to choose a value here and
203 // all values we have are padding, which we ignore.
204 if (OnPaddingOnly(hstart, hend, heightInput, padBottom) ||
205 OnPaddingOnly(wstart, wend, widthInput, padRight))
206 {
207 result = 0.0f;
208 }
209
210 bool clamped = ClampRange(wstart, wend, widthInput);
211 clamped |= ClampRange(hstart, hend, heightInput);
212
213 if (clamped && params.m_PaddingMethod == PaddingMethod::Exclude)
214 {
telsoa01c577f2c2018-08-31 09:22:23 +0100215 // When we exclude the padding, it means we calculate with a smaller
216 // kernel size, so I changed the divisor here.
telsoa014fcda012018-03-09 14:13:49 +0000217 poolAreaSize = boost::numeric_cast<float>((hend - hstart) * (wend - wstart));
218 }
219
220 for (auto yInput = hstart; yInput < hend; yInput++)
221 {
222 for (auto xInput = wstart; xInput < wend; xInput++)
223 {
224 float inval = in[n * widthInput * heightInput * channels +
225 c * widthInput * heightInput +
226 yInput * widthInput +
227 xInput];
228
229 accumulate(result, inval);
230 }
231 }
232
233 execute(result, poolAreaSize);
234
235 out[n * widthOutput * heightOutput * channels +
236 c * widthOutput * heightOutput +
237 yOutput * widthOutput +
238 xOutput] = result;
239 }
240 }
241 }
242 }
243}
244
245} //namespace armnn