blob: 38f1b2f1f91c41e51d6f1a4ed600efa784a04b60 [file] [log] [blame]
Sheri Zhang79144a62021-02-08 17:43:04 +00001/*
Adnan AlSinan9104cd52022-04-06 16:19:31 +01002 * Copyright (c) 2021-2022 Arm Limited.
Sheri Zhang79144a62021-02-08 17:43:04 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef SRC_CORE_NEON_KERNELS_QUANTIZED_H
25#define SRC_CORE_NEON_KERNELS_QUANTIZED_H
26
27#include "arm_compute/core/Types.h"
28#include "arm_compute/core/utils/misc/Traits.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010029
30#include "src/core/helpers/PoolingHelpers.h"
Sheri Zhang79144a62021-02-08 17:43:04 +000031#include "src/core/NEON/NEAsymm.h"
32#include "src/core/NEON/NEFixedPoint.h"
33#include "src/core/NEON/NEMath.h"
34#include "src/core/NEON/wrapper/wrapper.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010035
Sheri Zhang79144a62021-02-08 17:43:04 +000036#include <arm_neon.h>
37
38namespace arm_compute
39{
40namespace cpu
41{
42template <typename T>
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010043void poolingMxN_q8_neon_nhwc(const ITensor *src,
44 ITensor *dst0,
45 ITensor *dst1,
46 PoolingLayerInfo &pool_info,
47 const Window &window_src,
48 const Window &window)
Sheri Zhang79144a62021-02-08 17:43:04 +000049{
50 ARM_COMPUTE_UNUSED(dst1);
51
52 const int window_start_x = window.x().start();
53 const int window_end_x = window.x().end();
54 const int window_step_x = 16;
55 const int window_half_step_x = window_step_x / 2;
56
57 Window window_out = window;
58 window_out.set(Window::DimX, Window::Dimension(0, 1, 1));
59
60 Iterator in(src, window_src);
61 Iterator out(dst0, window_out);
62
63 using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
64 using q8x16_t = typename wrapper::traits::neon_vector<T, 16>::type;
65 using q16_t = typename wrapper::traits::promote_t<T>;
66 using q16x8_t = typename wrapper::traits::neon_vector<q16_t, 8>::type;
67 using q32_t = typename wrapper::traits::promote_t<q16_t>;
68 using q32x4_t = typename wrapper::traits::neon_vector<q32_t, 4>::type;
69
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010070 const int pool_size_x = pool_info.is_global_pooling ? src->info()->tensor_shape().y() : pool_info.pool_size.width;
71 const int pool_size_y = pool_info.is_global_pooling ? src->info()->tensor_shape().z() : pool_info.pool_size.height;
Sheri Zhang79144a62021-02-08 17:43:04 +000072 const int pool_pad_right = pool_info.pad_stride_info.pad_right();
73 const int pool_pad_top = pool_info.pad_stride_info.pad_top();
74 const int pool_pad_left = pool_info.pad_stride_info.pad_left();
75 const int pool_pad_bottom = pool_info.pad_stride_info.pad_bottom();
76
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010077 int pool_stride_x = 0;
78 int pool_stride_y = 0;
Sheri Zhang79144a62021-02-08 17:43:04 +000079 std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride();
80 const int upper_bound_w = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_right);
81 const int upper_bound_h = src->info()->dimension(2) + (pool_info.exclude_padding ? 0 : pool_pad_bottom);
82
83 const float32x4_t half_scale_v = vdupq_n_f32(0.5f);
84 const UniformQuantizationInfo src_qinfo = src->info()->quantization_info().uniform();
85 const UniformQuantizationInfo dst_qinfo = dst0->info()->quantization_info().uniform();
86
87 const float quant_rescale = dst_qinfo.scale / src_qinfo.scale;
88 // "new_offset" doesn't have to consider the "half_scale_v" in its computation
89 // With a requantization performed in a single step there won't be uncertainties introduced
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010090 const int32_t new_offset =
91 dst_qinfo.offset - static_cast<int32_t>(static_cast<float>(src_qinfo.offset) / quant_rescale);
Sheri Zhang79144a62021-02-08 17:43:04 +000092
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010093 const float requant_scale = dst_qinfo.scale / src_qinfo.scale;
94 const int32_t requant_offset =
95 dst_qinfo.offset - static_cast<int32_t>(static_cast<float>(src_qinfo.offset) / requant_scale);
96 const UniformQuantizationInfo requant_qinfo = UniformQuantizationInfo(requant_scale, requant_offset);
Sheri Zhang79144a62021-02-08 17:43:04 +000097
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010098 execute_window_loop(
99 window_out,
100 [&](const Coordinates &id)
Sheri Zhang79144a62021-02-08 17:43:04 +0000101 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100102 const int idx_width = id.y() * pool_stride_x;
103 const int idx_height = id.z() * pool_stride_y;
104 const int pool_limit_y = pool_pad_top - idx_height;
105 const int pool_limit_x = pool_pad_left - idx_width;
106
107 const int pool_start_y = std::max(0, window_src.z().start() + pool_limit_y);
108 const int pool_end_y = std::min(pool_size_y, window_src.z().end() + pool_limit_y);
109 const int pool_start_x = std::max(0, window_src.y().start() + pool_limit_x);
110 const int pool_end_x = std::min(pool_size_x, window_src.y().end() + pool_limit_x);
111
112 int x_off = window_start_x;
113 for (; x_off <= (window_end_x - window_step_x); x_off += window_step_x)
Sheri Zhang79144a62021-02-08 17:43:04 +0000114 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100115 if (pool_info.pool_type != PoolingType::MAX)
Sheri Zhang79144a62021-02-08 17:43:04 +0000116 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100117 q32x4_t vres1 = wrapper::vdup_n(static_cast<q32_t>(0.f), wrapper::traits::vector_128_tag{});
118 q32x4_t vres2 = wrapper::vdup_n(static_cast<q32_t>(0.f), wrapper::traits::vector_128_tag{});
119 q32x4_t vres3 = wrapper::vdup_n(static_cast<q32_t>(0.f), wrapper::traits::vector_128_tag{});
120 q32x4_t vres4 = wrapper::vdup_n(static_cast<q32_t>(0.f), wrapper::traits::vector_128_tag{});
Sheri Zhang79144a62021-02-08 17:43:04 +0000121
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100122 // Calculate scale
123 const float scale = calculate_avg_scale_pool2d(
124 pool_info.exclude_padding, DataLayout::NHWC, id, pool_size_x, pool_size_y, upper_bound_w,
125 upper_bound_h, pool_pad_left, pool_pad_top, pool_stride_x, pool_stride_y);
Sheri Zhang79144a62021-02-08 17:43:04 +0000126
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100127 // Perform pooling
128 for (int y = pool_start_y; y < pool_end_y; ++y)
Sheri Zhang79144a62021-02-08 17:43:04 +0000129 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100130 for (int x = pool_start_x; x < pool_end_x; ++x)
Sheri Zhang79144a62021-02-08 17:43:04 +0000131 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100132 const q8x16_t data = wrapper::vloadq(
133 reinterpret_cast<const T *>(
134 in.ptr() +
135 (x - pool_pad_left) * static_cast<int>(src->info()->strides_in_bytes().y()) +
136 (y - pool_pad_top) * static_cast<int>(src->info()->strides_in_bytes().z())) +
137 x_off);
138
139 const q16x8_t data_q16 = wrapper::vmovl(wrapper::vgetlow(data));
140 const q16x8_t data2_q16 = wrapper::vmovl(wrapper::vgethigh(data));
141 vres1 = wrapper::vadd(vres1, wrapper::vmovl(wrapper::vgetlow(data_q16)));
142 vres2 = wrapper::vadd(vres2, wrapper::vmovl(wrapper::vgethigh(data_q16)));
143 vres3 = wrapper::vadd(vres3, wrapper::vmovl(wrapper::vgetlow(data2_q16)));
144 vres4 = wrapper::vadd(vres4, wrapper::vmovl(wrapper::vgethigh(data2_q16)));
145 }
146 }
147
148 if (src_qinfo != dst_qinfo)
149 {
150 const float32x4x4_t vres = {{
Sheri Zhang79144a62021-02-08 17:43:04 +0000151 vcvtq_f32_q32(vres1),
152 vcvtq_f32_q32(vres2),
153 vcvtq_f32_q32(vres3),
154 vcvtq_f32_q32(vres4),
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100155 }};
156 const auto requantized_dst =
157 vrequantize_pooling_with_scale<q8x16_t>(vres, quant_rescale, scale, new_offset);
158 // Store result
159 wrapper::vstore(reinterpret_cast<T *>(out.ptr()) + x_off, wrapper::vgetlow(requantized_dst));
160 wrapper::vstore(reinterpret_cast<T *>(out.ptr()) + x_off + 8,
161 wrapper::vgethigh(requantized_dst));
162 }
163 else
164 {
165 const float32x4_t scale_v = vdupq_n_f32(scale);
166 // Divide by scale and add 0.5f to round to nearest instead of rounding towards zero
167 vres1 = vcvtq_q32_f32<q32x4_t>(wrapper::vmla(half_scale_v, vcvtq_f32_q32(vres1), scale_v));
168 vres2 = vcvtq_q32_f32<q32x4_t>(wrapper::vmla(half_scale_v, vcvtq_f32_q32(vres2), scale_v));
169 vres3 = vcvtq_q32_f32<q32x4_t>(wrapper::vmla(half_scale_v, vcvtq_f32_q32(vres3), scale_v));
170 vres4 = vcvtq_q32_f32<q32x4_t>(wrapper::vmla(half_scale_v, vcvtq_f32_q32(vres4), scale_v));
171
172 const q8x8_t res1 =
173 wrapper::vmovn(wrapper::vcombine(wrapper::vmovn(vres1), wrapper::vmovn(vres2)));
174 const q8x8_t res2 =
175 wrapper::vmovn(wrapper::vcombine(wrapper::vmovn(vres3), wrapper::vmovn(vres4)));
176 // Store result
177 wrapper::vstore(reinterpret_cast<T *>(out.ptr()) + x_off, res1);
178 wrapper::vstore(reinterpret_cast<T *>(out.ptr()) + x_off + 8, res2);
179 }
180 }
181 else
182 {
183 q8x16_t vres = wrapper::vdup_n(std::numeric_limits<T>::min(), wrapper::traits::vector_128_tag{});
184
185 for (int y = pool_start_y; y < pool_end_y; ++y)
186 {
187 for (int x = pool_start_x; x < pool_end_x; ++x)
188 {
189 const q8x16_t data = wrapper::vloadq(
190 reinterpret_cast<const T *>(
191 in.ptr() +
192 (x - pool_pad_left) * static_cast<int>(src->info()->strides_in_bytes().y()) +
193 (y - pool_pad_top) * static_cast<int>(src->info()->strides_in_bytes().z())) +
194 x_off);
195 vres = wrapper::vmax(vres, data);
Sheri Zhang79144a62021-02-08 17:43:04 +0000196 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100197 }
198
Sheri Zhang79144a62021-02-08 17:43:04 +0000199 // Store result
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100200 wrapper::vstore(reinterpret_cast<T *>(out.ptr()) + x_off,
201 (src_qinfo != dst_qinfo)
202 ? vrequantize_pooling<q8x8_t, q8x16_t>(wrapper::vgetlow(vres),
203 wrapper::vgethigh(vres), requant_qinfo)
204 : vres);
205 }
206 }
207
208 if (pool_info.pool_type == PoolingType::MAX)
209 {
210 for (; x_off <= (window_end_x - window_half_step_x); x_off += window_half_step_x)
211 {
212 q8x8_t vres = wrapper::vdup_n(std::numeric_limits<T>::min(), wrapper::traits::vector_64_tag{});
213 for (int y = pool_start_y; y < pool_end_y; ++y)
214 {
215 for (int x = pool_start_x; x < pool_end_x; ++x)
216 {
217 const q8x8_t data = wrapper::vload(
218 reinterpret_cast<const T *>(
219 in.ptr() +
220 (x - pool_pad_left) * static_cast<int>(src->info()->strides_in_bytes().y()) +
221 (y - pool_pad_top) * static_cast<int>(src->info()->strides_in_bytes().z())) +
222 x_off);
223 vres = wrapper::vmax(vres, data);
224 }
225 }
226
227 // Store result
228 wrapper::vstore(reinterpret_cast<T *>(out.ptr()) + x_off,
229 (src_qinfo != dst_qinfo) ? vrequantize_pooling<q8x8_t>(vres, requant_qinfo) : vres);
230 }
231 }
232
233 // Left-overs loop
234 for (; x_off < window_end_x; ++x_off)
235 {
236 if (pool_info.pool_type != PoolingType::MAX)
237 {
238 q32_t res = static_cast<q32_t>(0.f);
239
240 // Calculate scale
241 const float scale = calculate_avg_scale_pool2d(
242 pool_info.exclude_padding, DataLayout::NHWC, id, pool_size_x, pool_size_y, upper_bound_w,
243 upper_bound_h, pool_pad_left, pool_pad_top, pool_stride_x, pool_stride_y);
244
245 // Perform pooling
246 for (int y = pool_start_y; y < pool_end_y; ++y)
247 {
248 for (int x = pool_start_x; x < pool_end_x; ++x)
249 {
250 const T data =
251 *(reinterpret_cast<const T *>(
252 in.ptr() +
253 (x - pool_pad_left) * static_cast<int>(src->info()->strides_in_bytes().y()) +
254 (y - pool_pad_top) * static_cast<int>(src->info()->strides_in_bytes().z())) +
255 x_off);
256 res += data;
257 }
258 }
259
260 if (src_qinfo != dst_qinfo)
261 {
262 const float res_f = static_cast<float>(res);
263 const float new_scale = quant_rescale / scale;
264 const auto requantized_dst = quantize<T>(res_f, UniformQuantizationInfo(new_scale, new_offset));
265
266 // Store result
267 *(reinterpret_cast<T *>(out.ptr()) + x_off) = requantized_dst;
268 }
269 else
270 {
271 // Divide by scale and add 0.5f to round to nearest instead of rounding towards zero
272 res = static_cast<T>(0.5f + static_cast<float>(res) * scale);
273
274 // Store result
275 *(reinterpret_cast<T *>(out.ptr()) + x_off) = res;
276 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000277 }
278 else
279 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100280 T res = std::numeric_limits<T>::min();
Sheri Zhang79144a62021-02-08 17:43:04 +0000281
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100282 for (int y = pool_start_y; y < pool_end_y; ++y)
Sheri Zhang79144a62021-02-08 17:43:04 +0000283 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100284 for (int x = pool_start_x; x < pool_end_x; ++x)
285 {
286 const T data =
287 *(reinterpret_cast<const T *>(
288 in.ptr() +
289 (x - pool_pad_left) * static_cast<int>(src->info()->strides_in_bytes().y()) +
290 (y - pool_pad_top) * static_cast<int>(src->info()->strides_in_bytes().z())) +
291 x_off);
292 res = std::max(res, data);
293 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000294 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000295
296 // Store result
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100297 if (src_qinfo != dst_qinfo)
Sheri Zhang79144a62021-02-08 17:43:04 +0000298 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100299 const float res_f = static_cast<float>(res);
300 *(reinterpret_cast<T *>(out.ptr()) + x_off) = quantize<T>(res_f, requant_qinfo);
301 }
302 else
303 {
304 *(reinterpret_cast<T *>(out.ptr()) + x_off) = res;
Sheri Zhang79144a62021-02-08 17:43:04 +0000305 }
306 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000307 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100308 },
309 in, out);
Sheri Zhang79144a62021-02-08 17:43:04 +0000310}
311
312#if defined(ENABLE_NCHW_KERNELS)
313template <typename T, typename TVec>
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100314inline void scale_vector_q16x8(bool exclude_padding,
315 TVec &v,
316 const Coordinates &id,
317 int id_offset,
318 int step,
319 const int pool_size,
320 const int upper_bound_w,
321 const int upper_bound_h,
322 const int pad_x,
323 const int pad_y,
324 const int stride_x,
325 const int stride_y)
Sheri Zhang79144a62021-02-08 17:43:04 +0000326{
327 int start_x = (id.x() + id_offset) * stride_x - pad_x;
328 int start_y = id.y() * stride_y - pad_y;
329 const int end_y = std::min(start_y + pool_size, upper_bound_h);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100330 if (exclude_padding)
Sheri Zhang79144a62021-02-08 17:43:04 +0000331 {
332 start_y = std::max(0, start_y);
333 }
334
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100335 std::array<T, 8> elems = {{
336 wrapper::vgetlane(v, 0),
337 wrapper::vgetlane(v, 1),
338 wrapper::vgetlane(v, 2),
339 wrapper::vgetlane(v, 3),
340 wrapper::vgetlane(v, 4),
341 wrapper::vgetlane(v, 5),
342 wrapper::vgetlane(v, 6),
343 wrapper::vgetlane(v, 7),
344 }};
Sheri Zhang79144a62021-02-08 17:43:04 +0000345
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100346 for (auto &el : elems)
Sheri Zhang79144a62021-02-08 17:43:04 +0000347 {
348 int c_start_x = start_x;
349 const int end_x = std::min(c_start_x + pool_size, upper_bound_w);
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100350 if (exclude_padding)
Sheri Zhang79144a62021-02-08 17:43:04 +0000351 {
352 c_start_x = std::max(0, c_start_x);
353 }
354 float scale = 1.f / ((end_y - start_y) * (end_x - c_start_x));
355 el *= scale;
356 start_x += step * stride_x;
357 }
358
359 v = wrapper::vsetlane(elems[0], v, 0);
360 v = wrapper::vsetlane(elems[1], v, 1);
361 v = wrapper::vsetlane(elems[2], v, 2);
362 v = wrapper::vsetlane(elems[3], v, 3);
363 v = wrapper::vsetlane(elems[4], v, 4);
364 v = wrapper::vsetlane(elems[5], v, 5);
365 v = wrapper::vsetlane(elems[6], v, 6);
366 v = wrapper::vsetlane(elems[7], v, 7);
367}
368
369template <typename T>
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100370auto load16_boundary_aware(
371 int srcw, int srch, int pad_l, int pad_r, int pad_t, int pad_b, int x, int y, const T *ptr, T fval)
Freddie Liardetded36632021-09-03 15:08:23 +0100372{
373 ARM_COMPUTE_UNUSED(pad_b, pad_r);
374 T vec[16];
375 //handle reading a row out of the tensor
376 const bool row_in_bounds((y >= pad_t) && (y < (srch + pad_t)));
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100377 for (int i = 0; i < 16; i++)
Freddie Liardetded36632021-09-03 15:08:23 +0100378 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100379 if (row_in_bounds && (x + i >= pad_l) && (x + i < (srcw + pad_l)))
Freddie Liardetded36632021-09-03 15:08:23 +0100380 {
381 vec[i] = *(ptr + i);
382 }
383 else
384 {
385 vec[i] = fval;
386 }
387 }
388 return wrapper::vloadq(vec);
389}
390
391template <typename T, typename V, bool deinterleave>
392inline void write16_boundary_aware(int x, int dst_w, const V &lower, const V &upper, T *ptr)
393{
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100394 if (deinterleave)
Freddie Liardetded36632021-09-03 15:08:23 +0100395 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100396 for (int i = 0; i < 8 && (i * 2 + x) < dst_w; ++i)
Freddie Liardetded36632021-09-03 15:08:23 +0100397 {
398 *(ptr + i * 2) = lower[i];
399 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100400 for (int i = 0; i < 8 && (i * 2 + x + 1) < dst_w; ++i)
Freddie Liardetded36632021-09-03 15:08:23 +0100401 {
402 *(ptr + 1 + i * 2) = upper[i];
403 }
404 }
405 else
406 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100407 for (int i = 0; i < 8 && (i + x) < dst_w; ++i)
Freddie Liardetded36632021-09-03 15:08:23 +0100408 {
409 *(ptr + i) = lower[i];
410 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100411 for (int i = 0; i < 8 && (i + x + 8) < dst_w; ++i)
Freddie Liardetded36632021-09-03 15:08:23 +0100412 {
413 *(ptr + i + 8) = upper[i];
414 }
415 }
416}
417
418template <typename T, typename V>
419inline void write8_boundary_aware(int x, int dst_w, const V &v, T *ptr)
420{
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100421 for (int i = 0; i < 8 && (i + x) < dst_w; ++i)
Freddie Liardetded36632021-09-03 15:08:23 +0100422 {
423 *(ptr + i) = v[i];
424 }
425}
426
427template <typename T>
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100428void pooling2_quantized_neon_nchw(const ITensor *src,
429 ITensor *dst0,
430 ITensor *dst1,
431 PoolingLayerInfo &pool_info,
432 const Window &window_src,
433 const Window &window)
Sheri Zhang79144a62021-02-08 17:43:04 +0000434{
435 ARM_COMPUTE_UNUSED(dst1);
436 Iterator in(src, window_src);
437 Iterator out(dst0, window);
438
Michele Di Giorgio33f41fa2021-03-09 14:09:08 +0000439 /** SIMD vector types */
Adnan AlSinan9104cd52022-04-06 16:19:31 +0100440 using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
441 using q8x16_t = typename wrapper::traits::neon_vector<T, 16>::type;
Sheri Zhang79144a62021-02-08 17:43:04 +0000442 using q16_t = typename wrapper::traits::promote_t<T>;
443 using q16x4_t = typename wrapper::traits::neon_vector<q16_t, 4>::type;
444 using q16x8_t = typename wrapper::traits::neon_vector<q16_t, 8>::type;
445 using q16x8x2_t = typename wrapper::traits::neon_vector<q16_t, 16>::type;
446
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100447 constexpr int pool_size = 2;
448 int pool_stride_x = 0;
449 int pool_stride_y = 0;
450 const int pool_pad_right = pool_info.pad_stride_info.pad_right();
451 const int pool_pad_top = pool_info.pad_stride_info.pad_top();
452 const int pool_pad_left = pool_info.pad_stride_info.pad_left();
453 const int pool_pad_bottom = pool_info.pad_stride_info.pad_bottom();
Sheri Zhang79144a62021-02-08 17:43:04 +0000454 std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride();
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100455 const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right);
456 const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom);
457 const T *const src_top_ptr = reinterpret_cast<const T *>(
458 src->ptr_to_element(Coordinates(-static_cast<int>(pool_pad_left), -static_cast<int>(pool_pad_top))));
459 const T *const src_bottom_ptr = reinterpret_cast<const T *>(
460 src->ptr_to_element(Coordinates(-static_cast<int>(pool_pad_left), -static_cast<int>(pool_pad_top) + 1)));
Freddie Liardetded36632021-09-03 15:08:23 +0100461 const int scale_step_x = (pool_stride_x == 1) ? 2 : 1;
Sheri Zhang79144a62021-02-08 17:43:04 +0000462 const UniformQuantizationInfo src_qinfo = src->info()->quantization_info().uniform();
463 const UniformQuantizationInfo dst_qinfo = dst0->info()->quantization_info().uniform();
464 const bool have_different_qinfo = src_qinfo != dst_qinfo;
465
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100466 const float requant_scale = dst_qinfo.scale / src_qinfo.scale;
467 const int32_t requant_offset =
468 dst_qinfo.offset - static_cast<int32_t>(static_cast<float>(src_qinfo.offset) / requant_scale);
469 const UniformQuantizationInfo requant_qinfo = UniformQuantizationInfo(requant_scale, requant_offset);
470 const int src_w = src->info()->dimension(0);
471 const int src_h = src->info()->dimension(1);
472 const int dst_w = dst0->info()->dimension(0);
Freddie Liardetded36632021-09-03 15:08:23 +0100473
474 const T fill_value = (pool_info.pool_type == PoolingType::MAX) ? std::numeric_limits<T>::min() : T(0);
Sheri Zhang79144a62021-02-08 17:43:04 +0000475
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100476 execute_window_loop(
477 window,
478 [&](const Coordinates &id)
Sheri Zhang79144a62021-02-08 17:43:04 +0000479 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100480 const auto x_val = id.x() * pool_stride_x;
481 const auto y_val_0 = id.y() * pool_stride_y;
482 const auto y_val_1 = (id.y() * pool_stride_y) + 1;
Sheri Zhang79144a62021-02-08 17:43:04 +0000483
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100484 auto top_data =
485 load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, x_val,
486 y_val_0, reinterpret_cast<const T *>(src_top_ptr + in.offset()), fill_value);
487 auto bottom_data =
488 load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, x_val,
489 y_val_1, reinterpret_cast<const T *>(src_bottom_ptr + in.offset()), fill_value);
490
491 q8x8_t lower_res = {};
492 q8x8_t upper_res = {};
493
494 if (pool_info.pool_type != PoolingType::MAX)
Sheri Zhang79144a62021-02-08 17:43:04 +0000495 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100496 const q16x8x2_t top_data_q16 = {
497 {wrapper::vmovl(wrapper::vgetlow(top_data)), wrapper::vmovl(wrapper::vgethigh(top_data))}};
498 const q16x8x2_t bottom_data_q16 = {
499 {wrapper::vmovl(wrapper::vgetlow(bottom_data)), wrapper::vmovl(wrapper::vgethigh(bottom_data))}};
500
501 // Add rows
502 const q16x8x2_t vrsum = {{
Sheri Zhang79144a62021-02-08 17:43:04 +0000503 wrapper::vadd(top_data_q16.val[0], bottom_data_q16.val[0]),
504 wrapper::vadd(top_data_q16.val[1], bottom_data_q16.val[1]),
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100505 }};
Sheri Zhang79144a62021-02-08 17:43:04 +0000506
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100507 // Pair-wise add row data
508 const q16x4_t vpsum_1 = wrapper::vpadd(wrapper::vgetlow(vrsum.val[0]), wrapper::vgethigh(vrsum.val[0]));
509 const q16x4_t vpsum_2 = wrapper::vpadd(wrapper::vgetlow(vrsum.val[1]), wrapper::vgethigh(vrsum.val[1]));
Sheri Zhang79144a62021-02-08 17:43:04 +0000510
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100511 q16x8_t res_lower = wrapper::vcombine(vpsum_1, vpsum_2);
Sheri Zhang79144a62021-02-08 17:43:04 +0000512
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100513 // Scale lower result
514 scale_vector_q16x8<q16_t, q16x8_t>(pool_info.exclude_padding, res_lower, id, 0, scale_step_x, pool_size,
515 upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top,
516 pool_stride_x, pool_stride_y);
517 lower_res = wrapper::vmovn(res_lower);
Sheri Zhang79144a62021-02-08 17:43:04 +0000518
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100519 // Compute upper result for stride_x == 1
520 if (pool_stride_x == 1)
Sheri Zhang79144a62021-02-08 17:43:04 +0000521 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100522 // Shifted row sum
523 const q16x8x2_t vrsum_shifted = {
524 {wrapper::vext_1(vrsum.val[0], vrsum.val[1]), wrapper::vext_1(vrsum.val[1], vrsum.val[1])}};
Sheri Zhang79144a62021-02-08 17:43:04 +0000525
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100526 // Pair-wise add shifted row
527 q16x8_t res_upper = wrapper::vcombine(
528 wrapper::vpadd(wrapper::vgetlow(vrsum_shifted.val[0]), wrapper::vgethigh(vrsum_shifted.val[0])),
529 wrapper::vpadd(wrapper::vgetlow(vrsum_shifted.val[1]),
530 wrapper::vgethigh(vrsum_shifted.val[1])));
Sheri Zhang79144a62021-02-08 17:43:04 +0000531
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100532 // Scale upper result
533 scale_vector_q16x8<q16_t, q16x8_t>(pool_info.exclude_padding, res_upper, id, 1, 2, pool_size,
534 upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top,
535 pool_stride_x, pool_stride_y);
536 upper_res = wrapper::vmovn(res_upper);
537 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000538 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100539 else
Sheri Zhang79144a62021-02-08 17:43:04 +0000540 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100541 const q8x16_t max_data = wrapper::vmax(top_data, bottom_data);
542 lower_res = wrapper::vpmax(wrapper::vgetlow(max_data), wrapper::vgethigh(max_data));
543 if (pool_stride_x == 1)
544 {
545 const q8x16_t max_data_shifted = wrapper::vext_1(max_data, max_data);
546 upper_res = wrapper::vpmax(wrapper::vgetlow(max_data_shifted), wrapper::vgethigh(max_data_shifted));
547 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000548 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000549
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100550 if (have_different_qinfo)
551 {
552 const auto requantized_dst = vrequantize_pooling<q8x8_t, q8x16_t>(lower_res, upper_res, requant_qinfo);
553 lower_res = wrapper::vgetlow(requantized_dst);
554 upper_res = wrapper::vgethigh(requantized_dst);
555 }
556 auto out_ptr = reinterpret_cast<T *>(out.ptr());
557 // Store result
558 if (pool_stride_x == 1)
559 {
560 write16_boundary_aware<T, q8x8_t, true>(id.x(), dst_w, lower_res, upper_res, out_ptr);
561 }
562 else
563 {
564 write8_boundary_aware<T, q8x8_t>(id.x(), dst_w, lower_res, out_ptr);
565 }
566 },
567 in, out);
Sheri Zhang79144a62021-02-08 17:43:04 +0000568}
569
570template <typename T>
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100571void pooling3_quantized_neon_nchw(const ITensor *src,
572 ITensor *dst0,
573 ITensor *dst1,
574 PoolingLayerInfo &pool_info,
575 const Window &window_src,
576 const Window &window)
Sheri Zhang79144a62021-02-08 17:43:04 +0000577{
578 ARM_COMPUTE_UNUSED(dst1);
579 Iterator in(src, window_src);
580 Iterator out(dst0, window);
581
Michele Di Giorgio33f41fa2021-03-09 14:09:08 +0000582 /** SIMD vector types */
Sheri Zhang79144a62021-02-08 17:43:04 +0000583 using q8x8_t = typename wrapper::traits::neon_vector<T, 8>::type;
584 using q8x16_t = typename wrapper::traits::neon_vector<T, 16>::type;
585 using q8x8x2_t = typename std::conditional<std::is_same<T, uint8_t>::value, uint8x8x2_t, int8x8x2_t>::type;
586 using q16_t = typename wrapper::traits::promote_t<T>;
587 using q16x8_t = typename wrapper::traits::neon_vector<q16_t, 8>::type;
588 using q16x8x2_t = typename wrapper::traits::neon_vector<q16_t, 16>::type;
589
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100590 constexpr int pool_size = 3;
591 const int pool_pad_right = pool_info.pad_stride_info.pad_right();
592 const int pool_pad_top = pool_info.pad_stride_info.pad_top();
593 const int pool_pad_left = pool_info.pad_stride_info.pad_left();
594 const int pool_pad_bottom = pool_info.pad_stride_info.pad_bottom();
595 int pool_stride_x = 0;
596 int pool_stride_y = 0;
Sheri Zhang79144a62021-02-08 17:43:04 +0000597 std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride();
598 const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right);
599 const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom);
600
601 const UniformQuantizationInfo &src_qinfo = src->info()->quantization_info().uniform();
602 const UniformQuantizationInfo &dst_qinfo = dst0->info()->quantization_info().uniform();
603
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100604 const float requant_scale = dst_qinfo.scale / src_qinfo.scale;
605 const int32_t requant_offset =
606 dst_qinfo.offset - static_cast<int32_t>(static_cast<float>(src_qinfo.offset) / requant_scale);
607 const UniformQuantizationInfo requant_qinfo = UniformQuantizationInfo(requant_scale, requant_offset);
Sheri Zhang79144a62021-02-08 17:43:04 +0000608
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100609 const T *const src_top_ptr = reinterpret_cast<const T *>(
610 src->ptr_to_element(Coordinates(-static_cast<int>(pool_pad_left), -static_cast<int>(pool_pad_top))));
611 const T *const src_middle_ptr = reinterpret_cast<const T *>(
612 src->ptr_to_element(Coordinates(-static_cast<int>(pool_pad_left), -static_cast<int>(pool_pad_top) + 1)));
613 const T *const src_bottom_ptr = reinterpret_cast<const T *>(
614 src->ptr_to_element(Coordinates(-static_cast<int>(pool_pad_left), -static_cast<int>(pool_pad_top) + 2)));
Sheri Zhang79144a62021-02-08 17:43:04 +0000615
Freddie Liardetded36632021-09-03 15:08:23 +0100616 const int src_w = src->info()->dimension(0);
617 const int src_h = src->info()->dimension(1);
618 const T fill_value = (pool_info.pool_type == PoolingType::AVG) ? T(0) : std::numeric_limits<T>::min();
619 const int dst_w = dst0->info()->dimension(0);
620
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100621 execute_window_loop(
622 window,
623 [&](const Coordinates &id)
Sheri Zhang79144a62021-02-08 17:43:04 +0000624 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100625 const auto x_val = id.x() * pool_stride_x;
626 const auto y_val_0 = id.y() * pool_stride_y;
627 const auto y_val_1 = (id.y() * pool_stride_y) + 1;
628 const auto y_val_2 = (id.y() * pool_stride_y) + 2;
Sheri Zhang79144a62021-02-08 17:43:04 +0000629
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100630 auto top_data =
631 load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, x_val,
632 y_val_0, reinterpret_cast<const T *>(src_top_ptr + in.offset()), fill_value);
633 auto middle_data =
634 load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, x_val,
635 y_val_1, reinterpret_cast<const T *>(src_middle_ptr + in.offset()), fill_value);
636 auto bottom_data =
637 load16_boundary_aware(src_w, src_h, pool_pad_left, pool_pad_right, pool_pad_top, pool_pad_bottom, x_val,
638 y_val_2, reinterpret_cast<const T *>(src_bottom_ptr + in.offset()), fill_value);
639
640 q8x8_t fres = {};
641 q8x16_t fqres = {};
642
643 if (pool_info.pool_type == PoolingType::AVG)
Sheri Zhang79144a62021-02-08 17:43:04 +0000644 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100645 // Convert data to u16
646 const q16x8x2_t top_data_q16 = {
647 {wrapper::vmovl(wrapper::vgetlow(top_data)), wrapper::vmovl(wrapper::vgethigh(top_data))}};
648 const q16x8x2_t middle_data_q16 = {
649 {wrapper::vmovl(wrapper::vgetlow(middle_data)), wrapper::vmovl(wrapper::vgethigh(middle_data))}};
650 const q16x8x2_t bottom_data_q16 = {
651 {wrapper::vmovl(wrapper::vgetlow(bottom_data)), wrapper::vmovl(wrapper::vgethigh(bottom_data))}};
652
653 // Calculate row sums
654 const q16x8x2_t vrsum = {{
655 wrapper::vadd(wrapper::vadd(top_data_q16.val[0], bottom_data_q16.val[0]), middle_data_q16.val[0]),
656 wrapper::vadd(wrapper::vadd(top_data_q16.val[1], bottom_data_q16.val[1]), middle_data_q16.val[1]),
657 }};
658 const q16x8x2_t vrsum_shifted_1 = {
659 {wrapper::vext_1(vrsum.val[0], vrsum.val[1]), wrapper::vext_1(vrsum.val[1], vrsum.val[1])}};
660 const q16x8x2_t vrsum_shifted_2 = {
661 {wrapper::vext_2(vrsum.val[0], vrsum.val[1]), wrapper::vext_2(vrsum.val[1], vrsum.val[1])}};
662 // Calculate final sum
663 q16x8x2_t final_sum = {{
Sheri Zhang79144a62021-02-08 17:43:04 +0000664 wrapper::vadd(wrapper::vadd(vrsum.val[0], vrsum_shifted_1.val[0]), vrsum_shifted_2.val[0]),
665 wrapper::vadd(wrapper::vadd(vrsum.val[1], vrsum_shifted_1.val[1]), vrsum_shifted_2.val[1]),
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100666 }};
667 if (pool_stride_x == 2)
Sheri Zhang79144a62021-02-08 17:43:04 +0000668 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100669 q16x8_t res = {
670 wrapper::vgetlane(final_sum.val[0], 0), wrapper::vgetlane(final_sum.val[0], 2),
671 wrapper::vgetlane(final_sum.val[0], 4), wrapper::vgetlane(final_sum.val[0], 6),
672 wrapper::vgetlane(final_sum.val[1], 0), wrapper::vgetlane(final_sum.val[1], 2),
673 wrapper::vgetlane(final_sum.val[1], 4), wrapper::vgetlane(final_sum.val[1], 6),
674 };
Sheri Zhang79144a62021-02-08 17:43:04 +0000675
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100676 scale_vector_q16x8<q16_t, q16x8_t>(pool_info.exclude_padding, res, id, 0, 1, pool_size,
677 upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top,
678 pool_stride_x, pool_stride_y);
679 fres = wrapper::vmovn(res);
680 }
681 else
682 {
683 // Scale lower result
684 scale_vector_q16x8<q16_t, q16x8_t>(pool_info.exclude_padding, final_sum.val[0], id, 0, 1, pool_size,
685 upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top,
686 pool_stride_x, pool_stride_y);
687 // Scale lower result
688 scale_vector_q16x8<q16_t, q16x8_t>(pool_info.exclude_padding, final_sum.val[1], id, 8, 1, pool_size,
689 upper_bound_w, upper_bound_h, pool_pad_left, pool_pad_top,
690 pool_stride_x, pool_stride_y);
691 fqres = wrapper::vcombine(wrapper::vmovn(final_sum.val[0]), wrapper::vmovn(final_sum.val[1]));
692 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000693 }
694 else
695 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100696 const q8x16_t max_data = wrapper::vmax(wrapper::vmax(top_data, bottom_data), middle_data);
697 const q8x16_t max_data_shift1 = wrapper::vext_1(max_data, max_data);
698 const q8x16_t max_data_shift2 = wrapper::vext_2(max_data, max_data);
699 const q8x16_t final_max = wrapper::vmax(wrapper::vmax(max_data, max_data_shift1), max_data_shift2);
Sheri Zhang79144a62021-02-08 17:43:04 +0000700
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100701 if (pool_stride_x == 2)
702 {
703 const q8x8x2_t table = {{wrapper::vgetlow(final_max), wrapper::vgethigh(final_max)}};
704 static const q8x8_t lookup_val = {0, 2, 4, 6, 8, 10, 12, 14};
705 fres = wrapper::vtbl(table, lookup_val);
706 }
707 else
708 {
709 fqres = final_max;
710 }
711 }
712
713 // Store result
714 if (pool_stride_x == 1)
Sheri Zhang79144a62021-02-08 17:43:04 +0000715 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100716 if (src_qinfo != dst_qinfo)
717 {
718 fqres = vrequantize_pooling<q8x8_t, q8x16_t>(wrapper::vgetlow(fqres), wrapper::vgethigh(fqres),
719 requant_qinfo);
720 }
721 write16_boundary_aware<T, q8x8_t, false>(id.x(), dst_w, wrapper::vgetlow(fqres),
722 wrapper::vgethigh(fqres), reinterpret_cast<T *>(out.ptr()));
Sheri Zhang79144a62021-02-08 17:43:04 +0000723 }
724 else
725 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100726 if (src_qinfo != dst_qinfo)
727 {
728 fres = vrequantize_pooling<q8x8_t>(fres, requant_qinfo);
729 }
730 write8_boundary_aware<T, q8x8_t>(id.x(), dst_w, fres, reinterpret_cast<T *>(out.ptr()));
Sheri Zhang79144a62021-02-08 17:43:04 +0000731 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100732 },
733 in, out);
Sheri Zhang79144a62021-02-08 17:43:04 +0000734}
735
736template <typename T>
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100737void poolingMxN_quantized_neon_nchw(const ITensor *src,
738 ITensor *dst0,
739 ITensor *dst1,
740 PoolingLayerInfo &pool_info,
741 const Window &window_src,
742 const Window &window)
Sheri Zhang79144a62021-02-08 17:43:04 +0000743{
744 ARM_COMPUTE_UNUSED(dst1);
745 Iterator in(src, window_src);
746 Iterator out(dst0, window);
747
Michele Di Giorgio33f41fa2021-03-09 14:09:08 +0000748 /** SIMD vector types */
Freddie Liardetded36632021-09-03 15:08:23 +0100749 using q16_t = typename wrapper::traits::promote_t<T>;
750 using q32_t = typename wrapper::traits::promote_t<q16_t>;
Sheri Zhang79144a62021-02-08 17:43:04 +0000751
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100752 const int pool_size_x = pool_info.is_global_pooling ? src->info()->tensor_shape().x() : pool_info.pool_size.width;
753 const int pool_size_y = pool_info.is_global_pooling ? src->info()->tensor_shape().y() : pool_info.pool_size.height;
754 const int pool_pad_right = pool_info.pad_stride_info.pad_right();
755 const int pool_pad_top = pool_info.pad_stride_info.pad_top();
756 const int pool_pad_left = pool_info.pad_stride_info.pad_left();
757 const int pool_pad_bottom = pool_info.pad_stride_info.pad_bottom();
758 int pool_stride_x = 0;
759 int pool_stride_y = 0;
Sheri Zhang79144a62021-02-08 17:43:04 +0000760 std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info.stride();
761 const int upper_bound_w = src->info()->dimension(0) + (pool_info.exclude_padding ? 0 : pool_pad_right);
762 const int upper_bound_h = src->info()->dimension(1) + (pool_info.exclude_padding ? 0 : pool_pad_bottom);
763
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100764 const UniformQuantizationInfo &src_qinfo = src->info()->quantization_info().uniform();
765 const UniformQuantizationInfo &dst_qinfo = dst0->info()->quantization_info().uniform();
766 const int src_w = src->info()->dimension(0);
767 const int src_h = src->info()->dimension(1);
768 const T fill_value = (pool_info.pool_type == PoolingType::AVG) ? T(0) : std::numeric_limits<T>::min();
769 const int stridex_in_bytes = static_cast<int>(src->info()->strides_in_bytes().x());
770 const int stridey_in_bytes = static_cast<int>(src->info()->strides_in_bytes().y());
Sheri Zhang79144a62021-02-08 17:43:04 +0000771
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100772 execute_window_loop(
773 window,
774 [&](const Coordinates &id)
Sheri Zhang79144a62021-02-08 17:43:04 +0000775 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100776 T res = std::numeric_limits<T>::min();
Sheri Zhang79144a62021-02-08 17:43:04 +0000777
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100778 if (pool_info.pool_type != PoolingType::MAX)
Sheri Zhang79144a62021-02-08 17:43:04 +0000779 {
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100780 q32_t sres = 0;
Sheri Zhang79144a62021-02-08 17:43:04 +0000781
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100782 // Calculate scale
783 const float scale = calculate_avg_scale_pool2d(
784 pool_info.exclude_padding, DataLayout::NCHW, id, pool_size_x, pool_size_y, upper_bound_w,
785 upper_bound_h, pool_pad_left, pool_pad_top, pool_stride_x, pool_stride_y);
786
787 // Perform pooling
788 for (int y = 0; y < pool_size_y; ++y)
789 {
790 for (int x = 0; x < pool_size_x; ++x)
791 {
792 const auto in_ptr = reinterpret_cast<const T *>(
793 in.ptr() + (x - pool_pad_left) * stridex_in_bytes + (y - pool_pad_top) * stridey_in_bytes);
794
795 const int idx = x + id.x() * pool_stride_x - pool_pad_left;
796 const int idy = y + id.y() * pool_stride_y - pool_pad_top;
797 const T data = (idx < 0 || idy < 0 || idx >= src_w || idy >= src_h) ? fill_value : *in_ptr;
798 sres += data;
799 }
800 }
801 // Divide by scale
802 res = static_cast<T>(support::cpp11::round(sres * scale));
803 }
804 else
805 {
806 for (int y = 0; y < pool_size_y; ++y)
807 {
808 for (int x = 0; x < pool_size_x; ++x)
809 {
810 const auto in_ptr = reinterpret_cast<const T *>(
811 in.ptr() + (x - pool_pad_left) * stridex_in_bytes + (y - pool_pad_top) * stridey_in_bytes);
812
813 const int idx = x + id.x() * pool_stride_x - pool_pad_left;
814 const int idy = y + id.y() * pool_stride_y - pool_pad_top;
815 const T data = (idx < 0 || idy < 0 || idx >= src_w || idy >= src_h) ? fill_value : *in_ptr;
816 res = std::max(res, data);
817 }
Sheri Zhang79144a62021-02-08 17:43:04 +0000818 }
819 }
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100820 // Store result
821 res = (src_qinfo != dst_qinfo) ? Qasymm8QuantizationHelper<T>::quantize(
822 Qasymm8QuantizationHelper<T>::dequantize(res, src_qinfo), dst_qinfo)
823 : res;
824 *(reinterpret_cast<T *>(out.ptr())) = res;
825 },
826 in, out);
Sheri Zhang79144a62021-02-08 17:43:04 +0000827}
828#endif /* defined(ENABLE_NCHW_KERNELS) */
829} // namespace cpu
830} // namespace arm_compute
831
Sheri Zhangac6499a2021-02-10 15:32:38 +0000832#endif // SRC_CORE_NEON_KERNELS_QUANTIZED_H