blob: 5464885dc4a58e31ff75e16ba2d95199681245f2 [file] [log] [blame]
Georgios Pinitasdc460f12017-08-24 19:02:44 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "PoolingLayer.h"
25
26#include "tests/validation_new/FixedPoint.h"
27#include "tests/validation_new/half.h"
28
29namespace arm_compute
30{
31namespace test
32{
33namespace validation
34{
35namespace reference
36{
37namespace
38{
39TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info)
40{
41 TensorShape dst_shape = shape;
42 const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(shape.x(),
43 shape.y(),
44 info.pool_size(),
45 info.pool_size(),
46 info.pad_stride_info());
47 dst_shape.set(0, scaled_dims.first);
48 dst_shape.set(1, scaled_dims.second);
49
50 return dst_shape;
51}
52} // namespace
53
54template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
55SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
56{
57 const int pool_size = info.pool_size();
58 PoolingType type = info.pool_type();
59 int pool_stride_x = info.pad_stride_info().stride().first;
60 int pool_stride_y = info.pad_stride_info().stride().second;
61 int pad_x = info.pad_stride_info().pad().first;
62 int pad_y = info.pad_stride_info().pad().second;
63
64 const auto w_src = static_cast<int>(src.shape()[0]);
65 const auto h_src = static_cast<int>(src.shape()[1]);
66 const int upper_dims = src.shape().total_size() / (w_src * h_src);
67
68 // Create reference
69 SimpleTensor<T> dst{ calculate_output_shape(src.shape(), info), src.data_type(), 1, src.fixed_point_position() };
70
71 const auto w_dst = static_cast<int>(dst.shape()[0]);
72 const auto h_dst = static_cast<int>(dst.shape()[1]);
73
74 if(type == PoolingType::MAX)
75 {
76 for(int r = 0; r < upper_dims; ++r)
77 {
78 for(int h = 0; h < h_dst; ++h)
79 {
80 for(int w = 0; w < w_dst; ++w)
81 {
82 int wstart = w * pool_stride_x - pad_x;
83 int hstart = h * pool_stride_y - pad_y;
84 int wend = std::min(wstart + pool_size, w_src);
85 int hend = std::min(hstart + pool_size, h_src);
86 wstart = std::max(wstart, 0);
87 hstart = std::max(hstart, 0);
88
89 T max_val = std::numeric_limits<T>::lowest();
90 for(int y = hstart; y < hend; ++y)
91 {
92 for(int x = wstart; x < wend; ++x)
93 {
94 const T val = src[r * h_src * w_src + y * w_src + x];
95 if(val > max_val)
96 {
97 max_val = val;
98 }
99 }
100 }
101
102 dst[r * h_dst * w_dst + h * w_dst + w] = max_val;
103 }
104 }
105 }
106 }
107 else // Average pooling
108 {
109 for(int r = 0; r < upper_dims; ++r)
110 {
111 for(int h = 0; h < h_dst; ++h)
112 {
113 for(int w = 0; w < w_dst; ++w)
114 {
115 T avg_val(0);
116 int wstart = w * pool_stride_x - pad_x;
117 int hstart = h * pool_stride_y - pad_y;
118 int wend = std::min(wstart + pool_size, w_src + pad_x);
119 int hend = std::min(hstart + pool_size, h_src + pad_y);
120 int pool = (hend - hstart) * (wend - wstart);
121 wstart = std::max(wstart, 0);
122 hstart = std::max(hstart, 0);
123 wend = std::min(wend, w_src);
124 hend = std::min(hend, h_src);
125
126 for(int y = hstart; y < hend; ++y)
127 {
128 for(int x = wstart; x < wend; ++x)
129 {
130 avg_val += src[r * h_src * w_src + y * w_src + x];
131 }
132 }
133 dst[r * h_dst * w_dst + h * w_dst + w] = avg_val / pool;
134 }
135 }
136 }
137 }
138
139 return dst;
140}
141
142template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
143SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
144{
145 const int pool_size = info.pool_size();
146 PoolingType type = info.pool_type();
147 int pool_stride_x = info.pad_stride_info().stride().first;
148 int pool_stride_y = info.pad_stride_info().stride().second;
149 int pad_x = info.pad_stride_info().pad().first;
150 int pad_y = info.pad_stride_info().pad().second;
151
152 const auto w_src = static_cast<int>(src.shape()[0]);
153 const auto h_src = static_cast<int>(src.shape()[1]);
154 const int upper_dims = src.shape().total_size() / (w_src * h_src);
155
156 // Create reference
157 SimpleTensor<T> dst{ calculate_output_shape(src.shape(), info), src.data_type(), 1, src.fixed_point_position() };
158
159 const auto w_dst = static_cast<int>(dst.shape()[0]);
160 const auto h_dst = static_cast<int>(dst.shape()[1]);
161
162 if(type == PoolingType::MAX)
163 {
164 for(int r = 0; r < upper_dims; ++r)
165 {
166 for(int h = 0; h < h_dst; ++h)
167 {
168 for(int w = 0; w < w_dst; ++w)
169 {
170 int wstart = w * pool_stride_x - pad_x;
171 int hstart = h * pool_stride_y - pad_y;
172 int wend = std::min(wstart + pool_size, w_src);
173 int hend = std::min(hstart + pool_size, h_src);
174 wstart = std::max(wstart, 0);
175 hstart = std::max(hstart, 0);
176
177 T max_val = std::numeric_limits<T>::lowest();
178 for(int y = hstart; y < hend; ++y)
179 {
180 for(int x = wstart; x < wend; ++x)
181 {
182 const T val = src[r * h_src * w_src + y * w_src + x];
183 if(val > max_val)
184 {
185 max_val = val;
186 }
187 }
188 }
189
190 dst[r * h_dst * w_dst + h * w_dst + w] = max_val;
191 }
192 }
193 }
194 }
195 else // Average pooling
196 {
197 for(int r = 0; r < upper_dims; ++r)
198 {
199 for(int h = 0; h < h_dst; ++h)
200 {
201 for(int w = 0; w < w_dst; ++w)
202 {
203 int wstart = w * pool_stride_x - pad_x;
204 int hstart = h * pool_stride_y - pad_y;
205 int wend = std::min(wstart + pool_size, w_src + pad_x);
206 int hend = std::min(hstart + pool_size, h_src + pad_y);
207 int pool = (hend - hstart) * (wend - wstart);
208 wstart = std::max(wstart, 0);
209 hstart = std::max(hstart, 0);
210 wend = std::min(wend, w_src);
211 hend = std::min(hend, h_src);
212
213 using namespace fixed_point_arithmetic;
214
215 const int fixed_point_position = src.fixed_point_position();
216 const fixed_point<T> invpool_fp(1.f / static_cast<float>(pool), fixed_point_position);
217 fixed_point<T> avg_val(0, fixed_point_position, true);
218
219 for(int y = hstart; y < hend; ++y)
220 {
221 for(int x = wstart; x < wend; ++x)
222 {
223 const fixed_point<T> in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true);
224 avg_val = add(avg_val, in_fp);
225 }
226 }
227 dst[r * h_dst * w_dst + h * w_dst + w] = mul(avg_val, invpool_fp).raw();
228 }
229 }
230 }
231 }
232
233 return dst;
234}
235
236template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, PoolingLayerInfo info);
237template SimpleTensor<half_float::half> pooling_layer(const SimpleTensor<half_float::half> &src, PoolingLayerInfo info);
238template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, PoolingLayerInfo info);
239template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, PoolingLayerInfo info);
240} // namespace reference
241} // namespace validation
242} // namespace test
243} // namespace arm_compute