blob: 1a27684c9caa42cd4d26022b3e284519b20e2b5b [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2016, 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/Error.h"
25#include "arm_compute/core/Validate.h"
26
27#include <cmath>
28#include <numeric>
29
30namespace arm_compute
31{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032inline uint8_t pixel_area_c1u8_clamp(const uint8_t *first_pixel_ptr, size_t stride, size_t width, size_t height, float wr, float hr, int x, int y)
33{
34 ARM_COMPUTE_ERROR_ON(first_pixel_ptr == nullptr);
35
36 // Calculate sampling position
37 float in_x = (x + 0.5f) * wr - 0.5f;
38 float in_y = (y + 0.5f) * hr - 0.5f;
39
40 // Get bounding box offsets
41 int x_from = std::floor(x * wr - 0.5f - in_x);
42 int y_from = std::floor(y * hr - 0.5f - in_y);
43 int x_to = std::ceil((x + 1) * wr - 0.5f - in_x);
44 int y_to = std::ceil((y + 1) * hr - 0.5f - in_y);
45
46 // Clamp position to borders
47 in_x = std::max(-1.f, std::min(in_x, static_cast<float>(width)));
48 in_y = std::max(-1.f, std::min(in_y, static_cast<float>(height)));
49
50 // Clamp bounding box offsets to borders
51 x_from = ((in_x + x_from) < -1) ? -1 : x_from;
52 y_from = ((in_y + y_from) < -1) ? -1 : y_from;
53 x_to = ((in_x + x_to) > width) ? (width - in_x) : x_to;
54 y_to = ((in_y + y_to) > height) ? (height - in_y) : y_to;
55
56 // Get pixel index
57 const int xi = std::floor(in_x);
58 const int yi = std::floor(in_y);
59
60 // Bounding box elements in each dimension
61 const int x_elements = (x_to - x_from + 1);
62 const int y_elements = (y_to - y_from + 1);
63 ARM_COMPUTE_ERROR_ON(x_elements == 0 || y_elements == 0);
64
65 // Sum pixels in area
66 int sum = 0;
67 for(int j = yi + y_from, je = yi + y_to; j <= je; ++j)
68 {
69 const uint8_t *ptr = first_pixel_ptr + j * stride + xi + x_from;
70 sum = std::accumulate(ptr, ptr + x_elements, sum);
71 }
72
73 // Return average
74 return sum / (x_elements * y_elements);
75}
76
77template <size_t dimension>
78struct IncrementIterators
79{
80 template <typename T, typename... Ts>
81 static void unroll(T &&it, Ts &&... iterators)
82 {
83 it.increment(dimension);
84 IncrementIterators<dimension>::unroll<Ts...>(std::forward<Ts>(iterators)...);
85 }
86
87 template <typename T>
88 static void unroll(T &&it)
89 {
90 it.increment(dimension);
91 // End of recursion
92 }
93
94 static void unroll()
95 {
96 // End of recursion
97 }
98};
99
100template <size_t dim>
101struct ForEachDimension
102{
103 template <typename L, typename... Ts>
104 static void unroll(const Window &w, Coordinates &id, L &&lambda_function, Ts &&... iterators)
105 {
106 const auto &d = w[dim - 1];
107
108 for(auto v = d.start(); v < d.end(); v += d.step(), IncrementIterators < dim - 1 >::unroll(iterators...))
109 {
110 id.set(dim - 1, v);
111 ForEachDimension < dim - 1 >::unroll(w, id, lambda_function, iterators...);
112 }
113 }
114};
115
116template <>
117struct ForEachDimension<0>
118{
119 template <typename L, typename... Ts>
120 static void unroll(const Window &w, Coordinates &id, L &&lambda_function, Ts &&... iterators)
121 {
122 lambda_function(id);
123 }
124};
125
126template <typename L, typename... Ts>
127inline void execute_window_loop(const Window &w, L &&lambda_function, Ts &&... iterators)
128{
129 w.validate();
130
131 Coordinates id;
132 ForEachDimension<Coordinates::num_max_dimensions>::unroll(w, id, std::forward<L>(lambda_function), std::forward<Ts>(iterators)...);
133}
134
135inline constexpr Iterator::Iterator()
136 : _ptr(nullptr), _dims()
137{
138}
139
140inline Iterator::Iterator(const ITensor *tensor, const Window &win)
141 : Iterator()
142{
143 ARM_COMPUTE_ERROR_ON(tensor == nullptr);
144 const ITensorInfo *info = tensor->info();
145 ARM_COMPUTE_ERROR_ON(info == nullptr);
146 const Strides &strides = info->strides_in_bytes();
147
148 _ptr = tensor->buffer() + info->offset_first_element_in_bytes();
149
150 //Initialize the stride for each dimension and calculate the position of the first element of the iteration:
151 for(unsigned int n = 0; n < info->num_dimensions(); ++n)
152 {
153 _dims[n]._stride = win[n].step() * strides[n];
154 std::get<0>(_dims)._dim_start += strides[n] * win[n].start();
155 }
156
157 //Copy the starting point to all the dimensions:
158 for(unsigned int n = 1; n < Coordinates::num_max_dimensions; ++n)
159 {
160 _dims[n]._dim_start = std::get<0>(_dims)._dim_start;
161 }
162
163 ARM_COMPUTE_ERROR_ON_WINDOW_DIMENSIONS_GTE(win, info->num_dimensions());
164}
165
166inline void Iterator::increment(const size_t dimension)
167{
168 ARM_COMPUTE_ERROR_ON(dimension >= Coordinates::num_max_dimensions);
169
170 _dims[dimension]._dim_start += _dims[dimension]._stride;
171
172 for(unsigned int n = 0; n < dimension; ++n)
173 {
174 _dims[n]._dim_start = _dims[dimension]._dim_start;
175 }
176}
177
178inline constexpr int Iterator::offset() const
179{
180 return _dims.at(0)._dim_start;
181}
182
183inline constexpr uint8_t *Iterator::ptr() const
184{
185 return _ptr + _dims.at(0)._dim_start;
186}
187
188inline void Iterator::reset(const size_t dimension)
189{
190 ARM_COMPUTE_ERROR_ON(dimension >= Coordinates::num_max_dimensions - 1);
191
192 _dims[dimension]._dim_start = _dims[dimension + 1]._dim_start;
193
194 for(unsigned int n = 0; n < dimension; ++n)
195 {
196 _dims[n]._dim_start = _dims[dimension]._dim_start;
197 }
198}
199
Georgios Pinitas05078ec2017-11-02 13:06:59 +0000200inline bool auto_init_if_empty(ITensorInfo &info,
201 const TensorShape &shape,
202 int num_channels,
203 DataType data_type,
204 int fixed_point_position,
205 QuantizationInfo quantization_info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100206{
207 if(info.tensor_shape().total_size() == 0)
208 {
209 info.set_data_type(data_type);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100210 info.set_num_channels(num_channels);
Gian Marco Iodice559d7712017-08-08 08:38:09 +0100211 info.set_tensor_shape(shape);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100212 info.set_fixed_point_position(fixed_point_position);
Georgios Pinitas05078ec2017-11-02 13:06:59 +0000213 info.set_quantization_info(quantization_info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100214 return true;
215 }
216
217 return false;
218}
219
220inline bool set_shape_if_empty(ITensorInfo &info, const TensorShape &shape)
221{
222 if(info.tensor_shape().total_size() == 0)
223 {
224 info.set_tensor_shape(shape);
225 return true;
226 }
227
228 return false;
229}
230
231inline bool set_format_if_unknown(ITensorInfo &info, Format format)
232{
233 if(info.data_type() == DataType::UNKNOWN)
234 {
235 info.set_format(format);
236 return true;
237 }
238
239 return false;
240}
241
242inline bool set_data_type_if_unknown(ITensorInfo &info, DataType data_type)
243{
244 if(info.data_type() == DataType::UNKNOWN)
245 {
246 info.set_data_type(data_type);
247 return true;
248 }
249
250 return false;
251}
252
253inline bool set_fixed_point_position_if_zero(ITensorInfo &info, int fixed_point_position)
254{
255 if(info.fixed_point_position() == 0 && (info.data_type() == DataType::QS8 || info.data_type() == DataType::QS16))
256 {
257 info.set_fixed_point_position(fixed_point_position);
258 return true;
259 }
260
261 return false;
262}
Isabella Gottardi1fab09f2017-07-13 15:55:57 +0100263
Georgios Pinitas05078ec2017-11-02 13:06:59 +0000264inline bool set_quantization_info_if_empty(ITensorInfo &info, QuantizationInfo quantization_info)
265{
266 if(info.quantization_info().empty() && (is_data_type_assymetric(info.data_type())))
267 {
268 info.set_quantization_info(quantization_info);
269 return true;
270 }
271
272 return false;
273}
274
Isabella Gottardi1fab09f2017-07-13 15:55:57 +0100275inline ValidRegion calculate_valid_region_scale(const ITensorInfo &src_info, const TensorShape &dst_shape, InterpolationPolicy policy, BorderSize border_size, bool border_undefined)
276{
277 const auto wr = static_cast<float>(dst_shape[0]) / static_cast<float>(src_info.tensor_shape()[0]);
278 const auto hr = static_cast<float>(dst_shape[1]) / static_cast<float>(src_info.tensor_shape()[1]);
279 Coordinates anchor;
280 anchor.set_num_dimensions(src_info.tensor_shape().num_dimensions());
281 TensorShape new_dst_shape(dst_shape);
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100282 anchor.set(0, (policy == InterpolationPolicy::BILINEAR
283 && border_undefined) ?
284 ((static_cast<int>(src_info.valid_region().anchor[0]) + border_size.left + 0.5f) * wr - 0.5f) :
Isabella Gottardi1fab09f2017-07-13 15:55:57 +0100285 ((static_cast<int>(src_info.valid_region().anchor[0]) + 0.5f) * wr - 0.5f));
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100286 anchor.set(1, (policy == InterpolationPolicy::BILINEAR
287 && border_undefined) ?
288 ((static_cast<int>(src_info.valid_region().anchor[1]) + border_size.top + 0.5f) * hr - 0.5f) :
Isabella Gottardi1fab09f2017-07-13 15:55:57 +0100289 ((static_cast<int>(src_info.valid_region().anchor[1]) + 0.5f) * hr - 0.5f));
290 float shape_out_x = (policy == InterpolationPolicy::BILINEAR
291 && border_undefined) ?
292 ((static_cast<int>(src_info.valid_region().anchor[0]) + static_cast<int>(src_info.valid_region().shape[0]) - 1) - 1 + 0.5f) * wr - 0.5f :
293 ((static_cast<int>(src_info.valid_region().anchor[0]) + static_cast<int>(src_info.valid_region().shape[0])) + 0.5f) * wr - 0.5f;
294 float shape_out_y = (policy == InterpolationPolicy::BILINEAR
295 && border_undefined) ?
296 ((static_cast<int>(src_info.valid_region().anchor[1]) + static_cast<int>(src_info.valid_region().shape[1]) - 1) - 1 + 0.5f) * hr - 0.5f :
297 ((static_cast<int>(src_info.valid_region().anchor[1]) + static_cast<int>(src_info.valid_region().shape[1])) + 0.5f) * hr - 0.5f;
298
299 new_dst_shape.set(0, shape_out_x - anchor[0]);
300 new_dst_shape.set(1, shape_out_y - anchor[1]);
301
302 return ValidRegion(std::move(anchor), std::move(new_dst_shape));
303}
Georgios Pinitas5ee66ea2017-09-07 17:29:16 +0100304
305inline Coordinates index2coords(const TensorShape &shape, int index)
306{
307 int num_elements = shape.total_size();
308
309 ARM_COMPUTE_ERROR_ON_MSG(index < 0 || index >= num_elements, "Index has to be in [0, num_elements]!");
310 ARM_COMPUTE_ERROR_ON_MSG(num_elements == 0, "Cannot create coordinate from empty shape!");
311
312 Coordinates coord{ 0 };
313
314 for(int d = shape.num_dimensions() - 1; d >= 0; --d)
315 {
316 num_elements /= shape[d];
317 coord.set(d, index / num_elements);
318 index %= num_elements;
319 }
320
321 return coord;
322}
323
324inline int coords2index(const TensorShape &shape, const Coordinates &coord)
325{
326 int num_elements = shape.total_size();
327 ARM_COMPUTE_UNUSED(num_elements);
328 ARM_COMPUTE_ERROR_ON_MSG(num_elements == 0, "Cannot create linear index from empty shape!");
329
330 int index = 0;
331 int stride = 1;
332
333 for(unsigned int d = 0; d < coord.num_dimensions(); ++d)
334 {
335 index += coord[d] * stride;
336 stride *= shape[d];
337 }
338
339 return index;
340}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100341} // namespace arm_compute