blob: 0c4aeb61f5ee486a6808b55f3c39a7cd30e80326 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Manuel Bottinicc5171b2019-01-09 17:04:39 +00002 * Copyright (c) 2017-2019 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_UTILS_H__
25#define __ARM_COMPUTE_TEST_UTILS_H__
26
27#include "arm_compute/core/Coordinates.h"
28#include "arm_compute/core/Error.h"
John Richardson25f23682017-11-27 14:35:09 +000029#include "arm_compute/core/HOGInfo.h"
John Richardson8de92612018-02-22 14:09:31 +000030#include "arm_compute/core/PyramidInfo.h"
John Richardson25f23682017-11-27 14:35:09 +000031#include "arm_compute/core/Size2D.h"
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010032#include "arm_compute/core/TensorInfo.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010033#include "arm_compute/core/TensorShape.h"
34#include "arm_compute/core/Types.h"
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010035#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036
Joel Liang1c5ffd62017-12-28 10:09:51 +080037#ifdef ARM_COMPUTE_CL
38#include "arm_compute/core/CL/OpenCL.h"
39#include "arm_compute/runtime/CL/CLScheduler.h"
40#endif /* ARM_COMPUTE_CL */
41
42#ifdef ARM_COMPUTE_GC
43#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h"
44#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
45#endif /* ARM_COMPUTE_GC */
46
Anthony Barbier6ff3b192017-09-04 18:44:23 +010047#include <cmath>
48#include <cstddef>
49#include <limits>
50#include <memory>
SiCong Li3e363692017-07-04 15:02:10 +010051#include <random>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010052#include <sstream>
53#include <string>
54#include <type_traits>
SiCong Li3e363692017-07-04 15:02:10 +010055#include <vector>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010056
Georgios Pinitas12833d02019-07-25 13:31:10 +010057#include "arm_compute/runtime/CPP/CPPScheduler.h"
58#include "arm_compute/runtime/RuntimeContext.h"
59
Anthony Barbier6ff3b192017-09-04 18:44:23 +010060namespace arm_compute
61{
Joel Liang1c5ffd62017-12-28 10:09:51 +080062#ifdef ARM_COMPUTE_CL
63class CLTensor;
64#endif /* ARM_COMPUTE_CL */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010065namespace test
66{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010067/** Round floating-point value with half value rounding to positive infinity.
68 *
69 * @param[in] value floating-point value to be rounded.
70 *
71 * @return Floating-point value of rounded @p value.
72 */
73template <typename T, typename = typename std::enable_if<std::is_floating_point<T>::value>::type>
74inline T round_half_up(T value)
75{
76 return std::floor(value + 0.5f);
77}
78
79/** Round floating-point value with half value rounding to nearest even.
80 *
81 * @param[in] value floating-point value to be rounded.
82 * @param[in] epsilon precision.
83 *
84 * @return Floating-point value of rounded @p value.
85 */
86template <typename T, typename = typename std::enable_if<std::is_floating_point<T>::value>::type>
87inline T round_half_even(T value, T epsilon = std::numeric_limits<T>::epsilon())
88{
89 T positive_value = std::abs(value);
90 T ipart = 0;
91 std::modf(positive_value, &ipart);
92 // If 'value' is exactly halfway between two integers
93 if(std::abs(positive_value - (ipart + 0.5f)) < epsilon)
94 {
95 // If 'ipart' is even then return 'ipart'
96 if(std::fmod(ipart, 2.f) < epsilon)
97 {
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010098 return support::cpp11::copysign(ipart, value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099 }
100 // Else return the nearest even integer
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100101 return support::cpp11::copysign(std::ceil(ipart + 0.5f), value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100102 }
103 // Otherwise use the usual round to closest
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100104 return support::cpp11::copysign(support::cpp11::round(positive_value), value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100105}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106
107namespace traits
108{
109// *INDENT-OFF*
110// clang-format off
Alex Gildayc357c472018-03-21 13:54:09 +0000111/** Promote a type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100112template <typename T> struct promote { };
Alex Gildayc357c472018-03-21 13:54:09 +0000113/** Promote uint8_t to uint16_t */
114template <> struct promote<uint8_t> { using type = uint16_t; /**< Promoted type */ };
115/** Promote int8_t to int16_t */
116template <> struct promote<int8_t> { using type = int16_t; /**< Promoted type */ };
117/** Promote uint16_t to uint32_t */
118template <> struct promote<uint16_t> { using type = uint32_t; /**< Promoted type */ };
119/** Promote int16_t to int32_t */
120template <> struct promote<int16_t> { using type = int32_t; /**< Promoted type */ };
121/** Promote uint32_t to uint64_t */
122template <> struct promote<uint32_t> { using type = uint64_t; /**< Promoted type */ };
123/** Promote int32_t to int64_t */
124template <> struct promote<int32_t> { using type = int64_t; /**< Promoted type */ };
125/** Promote float to float */
126template <> struct promote<float> { using type = float; /**< Promoted type */ };
127/** Promote half to half */
128template <> struct promote<half> { using type = half; /**< Promoted type */ };
Pablo Tello383deec2017-06-23 10:40:05 +0100129
Alex Gildayc357c472018-03-21 13:54:09 +0000130/** Get promoted type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100131template <typename T>
132using promote_t = typename promote<T>::type;
133
134template <typename T>
135using make_signed_conditional_t = typename std::conditional<std::is_integral<T>::value, std::make_signed<T>, std::common_type<T>>::type;
John Richardson3c5f9492017-10-04 15:27:37 +0100136
137template <typename T>
138using make_unsigned_conditional_t = typename std::conditional<std::is_integral<T>::value, std::make_unsigned<T>, std::common_type<T>>::type;
139
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100140// clang-format on
141// *INDENT-ON*
142}
143
144/** Look up the format corresponding to a channel.
145 *
146 * @param[in] channel Channel type.
147 *
148 * @return Format that contains the given channel.
149 */
150inline Format get_format_for_channel(Channel channel)
151{
152 switch(channel)
153 {
154 case Channel::R:
155 case Channel::G:
156 case Channel::B:
157 return Format::RGB888;
158 default:
159 throw std::runtime_error("Unsupported channel");
160 }
161}
162
163/** Return the format of a channel.
164 *
165 * @param[in] channel Channel type.
166 *
167 * @return Format of the given channel.
168 */
169inline Format get_channel_format(Channel channel)
170{
171 switch(channel)
172 {
173 case Channel::R:
174 case Channel::G:
175 case Channel::B:
176 return Format::U8;
177 default:
178 throw std::runtime_error("Unsupported channel");
179 }
180}
181
182/** Base case of foldl.
183 *
184 * @return value.
185 */
186template <typename F, typename T>
187inline T foldl(F &&, const T &value)
188{
189 return value;
190}
191
192/** Base case of foldl.
193 *
194 * @return func(value1, value2).
195 */
196template <typename F, typename T, typename U>
197inline auto foldl(F &&func, T &&value1, U &&value2) -> decltype(func(value1, value2))
198{
199 return func(value1, value2);
200}
201
202/** Fold left.
203 *
204 * @param[in] func Binary function to be called.
205 * @param[in] initial Initial value.
206 * @param[in] value Argument passed to the function.
207 * @param[in] values Remaining arguments.
208 */
209template <typename F, typename I, typename T, typename... Vs>
210inline I foldl(F &&func, I &&initial, T &&value, Vs &&... values)
211{
212 return foldl(std::forward<F>(func), func(std::forward<I>(initial), std::forward<T>(value)), std::forward<Vs>(values)...);
213}
214
SiCong Libacaf9a2017-06-19 13:41:45 +0100215/** Create a valid region based on tensor shape, border mode and border size
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100216 *
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000217 * @param[in] a_shape Shape used as size of the valid region.
SiCong Libacaf9a2017-06-19 13:41:45 +0100218 * @param[in] border_undefined (Optional) Boolean indicating if the border mode is undefined.
219 * @param[in] border_size (Optional) Border size used to specify the region to exclude.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100220 *
SiCong Libacaf9a2017-06-19 13:41:45 +0100221 * @return A valid region starting at (0, 0, ...) with size of @p shape if @p border_undefined is false; otherwise
222 * return A valid region starting at (@p border_size.left, @p border_size.top, ...) with reduced size of @p shape.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100223 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000224inline ValidRegion shape_to_valid_region(const TensorShape &a_shape, bool border_undefined = false, BorderSize border_size = BorderSize(0))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100225{
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000226 ValidRegion valid_region{ Coordinates(), a_shape };
227
228 Coordinates &anchor = valid_region.anchor;
229 TensorShape &shape = valid_region.shape;
Moritz Pflanzera1848362017-08-25 12:30:03 +0100230
SiCong Libacaf9a2017-06-19 13:41:45 +0100231 if(border_undefined)
232 {
233 ARM_COMPUTE_ERROR_ON(shape.num_dimensions() < 2);
Moritz Pflanzera1848362017-08-25 12:30:03 +0100234
SiCong Libacaf9a2017-06-19 13:41:45 +0100235 anchor.set(0, border_size.left);
236 anchor.set(1, border_size.top);
Moritz Pflanzera1848362017-08-25 12:30:03 +0100237
238 const int valid_shape_x = std::max(0, static_cast<int>(shape.x()) - static_cast<int>(border_size.left) - static_cast<int>(border_size.right));
239 const int valid_shape_y = std::max(0, static_cast<int>(shape.y()) - static_cast<int>(border_size.top) - static_cast<int>(border_size.bottom));
240
241 shape.set(0, valid_shape_x);
242 shape.set(1, valid_shape_y);
SiCong Libacaf9a2017-06-19 13:41:45 +0100243 }
Moritz Pflanzera1848362017-08-25 12:30:03 +0100244
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000245 return valid_region;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100246}
247
Gian Marco37908d92017-11-07 14:38:22 +0000248/** Create a valid region for Gaussian Pyramid Half based on tensor shape and valid region at level "i - 1" and border mode
249 *
250 * @note The border size is 2 in case of Gaussian Pyramid Half
251 *
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000252 * @param[in] a_shape Shape used at level "i - 1" of Gaussian Pyramid Half
253 * @param[in] a_valid_region Valid region used at level "i - 1" of Gaussian Pyramid Half
Gian Marco37908d92017-11-07 14:38:22 +0000254 * @param[in] border_undefined (Optional) Boolean indicating if the border mode is undefined.
255 *
256 * return The valid region for the level "i" of Gaussian Pyramid Half
257 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000258inline ValidRegion shape_to_valid_region_gaussian_pyramid_half(const TensorShape &a_shape, const ValidRegion &a_valid_region, bool border_undefined = false)
Gian Marco37908d92017-11-07 14:38:22 +0000259{
260 constexpr int border_size = 2;
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000261
262 ValidRegion valid_region{ Coordinates(), a_shape };
263
264 Coordinates &anchor = valid_region.anchor;
265 TensorShape &shape = valid_region.shape;
Gian Marco37908d92017-11-07 14:38:22 +0000266
267 // Compute tensor shape for level "i" of Gaussian Pyramid Half
268 // dst_width = (src_width + 1) * 0.5f
269 // dst_height = (src_height + 1) * 0.5f
Gian Marco Iodice2abb2162018-04-11 10:49:04 +0100270 shape.set(0, (a_shape[0] + 1) * 0.5f);
271 shape.set(1, (a_shape[1] + 1) * 0.5f);
Gian Marco37908d92017-11-07 14:38:22 +0000272
273 if(border_undefined)
274 {
275 ARM_COMPUTE_ERROR_ON(shape.num_dimensions() < 2);
276
277 // Compute the left and top invalid borders
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000278 float invalid_border_left = static_cast<float>(a_valid_region.anchor.x() + border_size) / 2.0f;
279 float invalid_border_top = static_cast<float>(a_valid_region.anchor.y() + border_size) / 2.0f;
Gian Marco37908d92017-11-07 14:38:22 +0000280
281 // For the new anchor point we can have 2 cases:
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000282 // 1) If the width/height of the tensor shape is odd, we have to take the ceil value of (a_valid_region.anchor.x() + border_size) / 2.0f or (a_valid_region.anchor.y() + border_size / 2.0f
283 // 2) If the width/height of the tensor shape is even, we have to take the floor value of (a_valid_region.anchor.x() + border_size) / 2.0f or (a_valid_region.anchor.y() + border_size) / 2.0f
Gian Marco37908d92017-11-07 14:38:22 +0000284 // In this manner we should be able to propagate correctly the valid region along all levels of the pyramid
Gian Marco Iodice2abb2162018-04-11 10:49:04 +0100285 invalid_border_left = (a_shape[0] % 2) ? std::ceil(invalid_border_left) : std::floor(invalid_border_left);
286 invalid_border_top = (a_shape[1] % 2) ? std::ceil(invalid_border_top) : std::floor(invalid_border_top);
Gian Marco37908d92017-11-07 14:38:22 +0000287
288 // Set the anchor point
289 anchor.set(0, static_cast<int>(invalid_border_left));
290 anchor.set(1, static_cast<int>(invalid_border_top));
291
292 // Compute shape
293 // Calculate the right and bottom invalid borders at the previous level of the pyramid
Gian Marco Iodice2abb2162018-04-11 10:49:04 +0100294 const float prev_invalid_border_right = static_cast<float>(a_shape[0] - (a_valid_region.anchor.x() + a_valid_region.shape[0]));
295 const float prev_invalid_border_bottom = static_cast<float>(a_shape[1] - (a_valid_region.anchor.y() + a_valid_region.shape[1]));
Gian Marco37908d92017-11-07 14:38:22 +0000296
297 // Calculate the right and bottom invalid borders at the current level of the pyramid
298 const float invalid_border_right = std::ceil((prev_invalid_border_right + static_cast<float>(border_size)) / 2.0f);
299 const float invalid_border_bottom = std::ceil((prev_invalid_border_bottom + static_cast<float>(border_size)) / 2.0f);
300
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000301 const int valid_shape_x = std::max(0, static_cast<int>(shape.x()) - static_cast<int>(invalid_border_left) - static_cast<int>(invalid_border_right));
302 const int valid_shape_y = std::max(0, static_cast<int>(shape.y()) - static_cast<int>(invalid_border_top) - static_cast<int>(invalid_border_bottom));
Gian Marco37908d92017-11-07 14:38:22 +0000303
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000304 shape.set(0, valid_shape_x);
305 shape.set(1, valid_shape_y);
Gian Marco37908d92017-11-07 14:38:22 +0000306 }
307
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000308 return valid_region;
Gian Marco37908d92017-11-07 14:38:22 +0000309}
310
John Richardson2d008a42018-03-22 13:48:41 +0000311/** Create a valid region for Laplacian Pyramid based on tensor shape and valid region at level "i - 1" and border mode
312 *
313 * @note The border size is 2 in case of Laplacian Pyramid
314 *
315 * @param[in] a_shape Shape used at level "i - 1" of Laplacian Pyramid
316 * @param[in] a_valid_region Valid region used at level "i - 1" of Laplacian Pyramid
317 * @param[in] border_undefined (Optional) Boolean indicating if the border mode is undefined.
318 *
319 * return The valid region for the level "i" of Laplacian Pyramid
320 */
321inline ValidRegion shape_to_valid_region_laplacian_pyramid(const TensorShape &a_shape, const ValidRegion &a_valid_region, bool border_undefined = false)
322{
323 ValidRegion valid_region = shape_to_valid_region_gaussian_pyramid_half(a_shape, a_valid_region, border_undefined);
324
325 if(border_undefined)
326 {
327 const BorderSize gaussian5x5_border(2);
328
329 auto border_left = static_cast<int>(gaussian5x5_border.left);
330 auto border_right = static_cast<int>(gaussian5x5_border.right);
331 auto border_top = static_cast<int>(gaussian5x5_border.top);
332 auto border_bottom = static_cast<int>(gaussian5x5_border.bottom);
333
334 valid_region.anchor.set(0, valid_region.anchor[0] + border_left);
335 valid_region.anchor.set(1, valid_region.anchor[1] + border_top);
336 valid_region.shape.set(0, std::max(0, static_cast<int>(valid_region.shape[0]) - border_right - border_left));
337 valid_region.shape.set(1, std::max(0, static_cast<int>(valid_region.shape[1]) - border_top - border_bottom));
338 }
339
340 return valid_region;
341}
342
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100343/** Write the value after casting the pointer according to @p data_type.
344 *
345 * @warning The type of the value must match the specified data type.
346 *
347 * @param[out] ptr Pointer to memory where the @p value will be written.
348 * @param[in] value Value that will be written.
349 * @param[in] data_type Data type that will be written.
350 */
351template <typename T>
352void store_value_with_data_type(void *ptr, T value, DataType data_type)
353{
354 switch(data_type)
355 {
356 case DataType::U8:
Chunosovd621bca2017-11-03 17:33:15 +0700357 case DataType::QASYMM8:
Michalis Spyrouc8530212019-08-22 11:44:04 +0100358 case DataType::QASYMM8_PER_CHANNEL:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100359 *reinterpret_cast<uint8_t *>(ptr) = value;
360 break;
361 case DataType::S8:
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100362 case DataType::QSYMM8:
363 case DataType::QSYMM8_PER_CHANNEL:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100364 *reinterpret_cast<int8_t *>(ptr) = value;
365 break;
366 case DataType::U16:
Michele Di Giorgio35ea9a72019-08-23 12:02:06 +0100367 case DataType::QASYMM16:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100368 *reinterpret_cast<uint16_t *>(ptr) = value;
369 break;
370 case DataType::S16:
Manuel Bottini3689fcd2019-06-14 17:18:12 +0100371 case DataType::QSYMM16:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100372 *reinterpret_cast<int16_t *>(ptr) = value;
373 break;
374 case DataType::U32:
375 *reinterpret_cast<uint32_t *>(ptr) = value;
376 break;
377 case DataType::S32:
378 *reinterpret_cast<int32_t *>(ptr) = value;
379 break;
380 case DataType::U64:
381 *reinterpret_cast<uint64_t *>(ptr) = value;
382 break;
383 case DataType::S64:
384 *reinterpret_cast<int64_t *>(ptr) = value;
385 break;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100386 case DataType::F16:
Georgios Pinitas583137c2017-08-31 18:12:42 +0100387 *reinterpret_cast<half *>(ptr) = value;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100388 break;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100389 case DataType::F32:
390 *reinterpret_cast<float *>(ptr) = value;
391 break;
392 case DataType::F64:
393 *reinterpret_cast<double *>(ptr) = value;
394 break;
395 case DataType::SIZET:
396 *reinterpret_cast<size_t *>(ptr) = value;
397 break;
398 default:
399 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
400 }
401}
402
403/** Saturate a value of type T against the numeric limits of type U.
404 *
405 * @param[in] val Value to be saturated.
406 *
407 * @return saturated value.
408 */
409template <typename U, typename T>
410T saturate_cast(T val)
411{
412 if(val > static_cast<T>(std::numeric_limits<U>::max()))
413 {
414 val = static_cast<T>(std::numeric_limits<U>::max());
415 }
416 if(val < static_cast<T>(std::numeric_limits<U>::lowest()))
417 {
418 val = static_cast<T>(std::numeric_limits<U>::lowest());
419 }
420 return val;
421}
422
423/** Find the signed promoted common type.
424 */
425template <typename... T>
426struct common_promoted_signed_type
427{
Alex Gildayc357c472018-03-21 13:54:09 +0000428 /** Common type */
429 using common_type = typename std::common_type<T...>::type;
430 /** Promoted type */
431 using promoted_type = traits::promote_t<common_type>;
432 /** Intermediate type */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100433 using intermediate_type = typename traits::make_signed_conditional_t<promoted_type>::type;
434};
435
John Richardson3c5f9492017-10-04 15:27:37 +0100436/** Find the unsigned promoted common type.
437 */
438template <typename... T>
439struct common_promoted_unsigned_type
440{
Alex Gildayc357c472018-03-21 13:54:09 +0000441 /** Common type */
442 using common_type = typename std::common_type<T...>::type;
443 /** Promoted type */
444 using promoted_type = traits::promote_t<common_type>;
445 /** Intermediate type */
John Richardson3c5f9492017-10-04 15:27:37 +0100446 using intermediate_type = typename traits::make_unsigned_conditional_t<promoted_type>::type;
447};
448
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100449/** Convert a linear index into n-dimensional coordinates.
450 *
451 * @param[in] shape Shape of the n-dimensional tensor.
452 * @param[in] index Linear index specifying the i-th element.
453 *
454 * @return n-dimensional coordinates.
455 */
456inline Coordinates index2coord(const TensorShape &shape, int index)
457{
458 int num_elements = shape.total_size();
459
460 ARM_COMPUTE_ERROR_ON_MSG(index < 0 || index >= num_elements, "Index has to be in [0, num_elements]");
461 ARM_COMPUTE_ERROR_ON_MSG(num_elements == 0, "Cannot create coordinate from empty shape");
462
463 Coordinates coord{ 0 };
464
465 for(int d = shape.num_dimensions() - 1; d >= 0; --d)
466 {
467 num_elements /= shape[d];
468 coord.set(d, index / num_elements);
469 index %= num_elements;
470 }
471
472 return coord;
473}
474
475/** Linearise the given coordinate.
476 *
477 * Transforms the given coordinate into a linear offset in terms of
478 * elements.
479 *
480 * @param[in] shape Shape of the n-dimensional tensor.
481 * @param[in] coord The to be converted coordinate.
482 *
483 * @return Linear offset to the element.
484 */
485inline int coord2index(const TensorShape &shape, const Coordinates &coord)
486{
487 ARM_COMPUTE_ERROR_ON_MSG(shape.total_size() == 0, "Cannot get index from empty shape");
488 ARM_COMPUTE_ERROR_ON_MSG(coord.num_dimensions() == 0, "Cannot get index of empty coordinate");
489
490 int index = 0;
491 int dim_size = 1;
492
493 for(unsigned int i = 0; i < coord.num_dimensions(); ++i)
494 {
495 index += coord[i] * dim_size;
496 dim_size *= shape[i];
497 }
498
499 return index;
500}
501
502/** Check if a coordinate is within a valid region */
Moritz Pflanzera1848362017-08-25 12:30:03 +0100503inline bool is_in_valid_region(const ValidRegion &valid_region, Coordinates coord)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100504{
Moritz Pflanzer219c6912017-09-23 19:22:51 +0100505 for(size_t d = 0; d < Coordinates::num_max_dimensions; ++d)
Moritz Pflanzera1848362017-08-25 12:30:03 +0100506 {
507 if(coord[d] < valid_region.start(d) || coord[d] >= valid_region.end(d))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100508 {
509 return false;
510 }
511 }
Moritz Pflanzera1848362017-08-25 12:30:03 +0100512
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100513 return true;
514}
Moritz Pflanzer94450f12017-06-30 12:48:43 +0100515
516/** Create and initialize a tensor of the given type.
517 *
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100518 * @param[in] shape Tensor shape.
519 * @param[in] data_type Data type.
520 * @param[in] num_channels (Optional) Number of channels.
521 * @param[in] quantization_info (Optional) Quantization info for asymmetric quantized types.
522 * @param[in] data_layout (Optional) Data layout. Default is NCHW.
Pablo Tellodb8485a2019-09-24 11:03:47 +0100523 * @param[in] ctx (Optional) Pointer to the runtime context.
Moritz Pflanzer94450f12017-06-30 12:48:43 +0100524 *
525 * @return Initialized tensor of given type.
526 */
527template <typename T>
Chunosovd621bca2017-11-03 17:33:15 +0700528inline T create_tensor(const TensorShape &shape, DataType data_type, int num_channels = 1,
Pablo Tellodb8485a2019-09-24 11:03:47 +0100529 QuantizationInfo quantization_info = QuantizationInfo(), DataLayout data_layout = DataLayout::NCHW, IRuntimeContext *ctx = nullptr)
Moritz Pflanzer94450f12017-06-30 12:48:43 +0100530{
Pablo Tellodb8485a2019-09-24 11:03:47 +0100531 T tensor(ctx);
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100532 TensorInfo info(shape, num_channels, data_type);
Chunosovd621bca2017-11-03 17:33:15 +0700533 info.set_quantization_info(quantization_info);
Michalis Spyroucf581f52018-03-02 10:25:59 +0000534 info.set_data_layout(data_layout);
Chunosovd621bca2017-11-03 17:33:15 +0700535 tensor.allocator()->init(info);
Moritz Pflanzer94450f12017-06-30 12:48:43 +0100536
537 return tensor;
538}
SiCong Li3e363692017-07-04 15:02:10 +0100539
Ioan-Cristian Szabo2c350182017-12-20 16:27:37 +0000540/** Create and initialize a tensor of the given type.
541 *
542 * @param[in] shape Tensor shape.
543 * @param[in] format Format type.
Pablo Tellodb8485a2019-09-24 11:03:47 +0100544 * @param[in] ctx (Optional) Pointer to the runtime context.
Ioan-Cristian Szabo2c350182017-12-20 16:27:37 +0000545 *
546 * @return Initialized tensor of given type.
547 */
548template <typename T>
Pablo Tellodb8485a2019-09-24 11:03:47 +0100549inline T create_tensor(const TensorShape &shape, Format format, IRuntimeContext *ctx = nullptr)
Ioan-Cristian Szabo2c350182017-12-20 16:27:37 +0000550{
551 TensorInfo info(shape, format);
552
Pablo Tellodb8485a2019-09-24 11:03:47 +0100553 T tensor(ctx);
Ioan-Cristian Szabo2c350182017-12-20 16:27:37 +0000554 tensor.allocator()->init(info);
555
556 return tensor;
557}
558
Ioan-Cristian Szabo9414f642017-10-27 17:35:40 +0100559/** Create and initialize a multi-image of the given type.
560 *
561 * @param[in] shape Tensor shape.
562 * @param[in] format Format type.
563 *
564 * @return Initialized tensor of given type.
565 */
566template <typename T>
567inline T create_multi_image(const TensorShape &shape, Format format)
568{
569 T multi_image;
570 multi_image.init(shape.x(), shape.y(), format);
571
572 return multi_image;
573}
574
John Richardson25f23682017-11-27 14:35:09 +0000575/** Create and initialize a HOG (Histogram of Oriented Gradients) of the given type.
576 *
John Richardson684cb0f2018-01-09 11:17:00 +0000577 * @param[in] hog_info HOGInfo object
John Richardson25f23682017-11-27 14:35:09 +0000578 *
579 * @return Initialized HOG of given type.
580 */
581template <typename T>
John Richardson684cb0f2018-01-09 11:17:00 +0000582inline T create_HOG(const HOGInfo &hog_info)
John Richardson25f23682017-11-27 14:35:09 +0000583{
John Richardson684cb0f2018-01-09 11:17:00 +0000584 T hog;
John Richardson25f23682017-11-27 14:35:09 +0000585 hog.init(hog_info);
586
587 return hog;
588}
589
John Richardson8de92612018-02-22 14:09:31 +0000590/** Create and initialize a Pyramid of the given type.
591 *
592 * @param[in] pyramid_info The PyramidInfo object.
593 *
594 * @return Initialized Pyramid of given type.
595 */
596template <typename T>
597inline T create_pyramid(const PyramidInfo &pyramid_info)
598{
599 T pyramid;
600 pyramid.init_auto_padding(pyramid_info);
601
602 return pyramid;
603}
604
John Richardson32af1f82018-06-05 12:47:20 +0100605/** Initialize a convolution matrix.
606 *
607 * @param[in, out] conv The input convolution matrix.
608 * @param[in] width The width of the convolution matrix.
609 * @param[in] height The height of the convolution matrix.
610 * @param[in] seed The random seed to be used.
611 */
612inline void init_conv(int16_t *conv, unsigned int width, unsigned int height, std::random_device::result_type seed)
613{
614 std::mt19937 gen(seed);
615 std::uniform_int_distribution<int16_t> distribution_int16(-32768, 32767);
616
617 for(unsigned int i = 0; i < width * height; ++i)
618 {
619 conv[i] = distribution_int16(gen);
620 }
621}
622
623/** Initialize a separable convolution matrix.
624 *
625 * @param[in, out] conv The input convolution matrix.
626 * @param[in] width The width of the convolution matrix.
627 * @param[in] height The height of the convolution matrix.
628 * @param[in] seed The random seed to be used.
629 */
630inline void init_separable_conv(int16_t *conv, unsigned int width, unsigned int height, std::random_device::result_type seed)
631{
632 std::mt19937 gen(seed);
633 // Set it between -128 and 127 to ensure the matrix does not overflow
634 std::uniform_int_distribution<int16_t> distribution_int16(-128, 127);
635
Michalis Spyroufae513c2019-10-16 17:41:33 +0100636 int16_t *conv_row = new int16_t[width];
637 int16_t *conv_col = new int16_t[height];
John Richardson32af1f82018-06-05 12:47:20 +0100638
639 conv_row[0] = conv_col[0] = 1;
640 for(unsigned int i = 1; i < width; ++i)
641 {
642 conv_row[i] = distribution_int16(gen);
643 }
644
645 for(unsigned int i = 1; i < height; ++i)
646 {
647 conv_col[i] = distribution_int16(gen);
648 }
649
650 // Multiply two matrices
651 for(unsigned int i = 0; i < width; ++i)
652 {
653 for(unsigned int j = 0; j < height; ++j)
654 {
655 conv[i * width + j] = conv_col[i] * conv_row[j];
656 }
657 }
Michalis Spyroufae513c2019-10-16 17:41:33 +0100658
659 delete[] conv_row;
660 delete[] conv_col;
John Richardson32af1f82018-06-05 12:47:20 +0100661}
662
John Richardson684cb0f2018-01-09 11:17:00 +0000663/** Create a vector with a uniform distribution of floating point values across the specified range.
664 *
665 * @param[in] num_values The number of values to be created.
666 * @param[in] min The minimum value in distribution (inclusive).
667 * @param[in] max The maximum value in distribution (inclusive).
668 * @param[in] seed The random seed to be used.
669 *
670 * @return A vector that contains the requested number of random floating point values
671 */
672template <typename T, typename = typename std::enable_if<std::is_floating_point<T>::value>::type>
673inline std::vector<T> generate_random_real(unsigned int num_values, T min, T max, std::random_device::result_type seed)
674{
675 std::vector<T> v(num_values);
676 std::mt19937 gen(seed);
677 std::uniform_real_distribution<T> dist(min, max);
678
679 for(unsigned int i = 0; i < num_values; ++i)
680 {
681 v.at(i) = dist(gen);
682 }
683
684 return v;
685}
686
John Richardson8de92612018-02-22 14:09:31 +0000687/** Create a vector of random keypoints for pyramid representation.
688 *
689 * @param[in] shape The shape of the input tensor.
690 * @param[in] num_keypoints The number of keypoints to be created.
691 * @param[in] seed The random seed to be used.
692 * @param[in] num_levels The number of pyramid levels.
693 *
694 * @return A vector that contains the requested number of random keypoints
695 */
696inline std::vector<KeyPoint> generate_random_keypoints(const TensorShape &shape, size_t num_keypoints, std::random_device::result_type seed, size_t num_levels = 1)
697{
698 std::vector<KeyPoint> keypoints;
699 std::mt19937 gen(seed);
700
701 // Calculate distribution bounds
702 const auto min = static_cast<int>(std::pow(2, num_levels));
703 const auto max_width = static_cast<int>(shape.x());
704 const auto max_height = static_cast<int>(shape.y());
705
706 ARM_COMPUTE_ERROR_ON(min > max_width || min > max_height);
707
708 // Create distributions
709 std::uniform_int_distribution<> dist_w(min, max_width);
710 std::uniform_int_distribution<> dist_h(min, max_height);
711
712 for(unsigned int i = 0; i < num_keypoints; i++)
713 {
714 KeyPoint keypoint;
715 keypoint.x = dist_w(gen);
716 keypoint.y = dist_h(gen);
717 keypoint.tracking_status = 1;
718
719 keypoints.push_back(keypoint);
720 }
721
722 return keypoints;
723}
724
SiCong Li3e363692017-07-04 15:02:10 +0100725template <typename T, typename ArrayAccessor_T>
726inline void fill_array(ArrayAccessor_T &&array, const std::vector<T> &v)
727{
728 array.resize(v.size());
729 std::memcpy(array.buffer(), v.data(), v.size() * sizeof(T));
730}
SiCong Li86b53332017-08-23 11:02:43 +0100731
732/** Obtain numpy type string from DataType.
733 *
734 * @param[in] data_type Data type.
735 *
736 * @return numpy type string.
737 */
738inline std::string get_typestring(DataType data_type)
739{
740 // Check endianness
741 const unsigned int i = 1;
742 const char *c = reinterpret_cast<const char *>(&i);
743 std::string endianness;
744 if(*c == 1)
745 {
746 endianness = std::string("<");
747 }
748 else
749 {
750 endianness = std::string(">");
751 }
752 const std::string no_endianness("|");
753
754 switch(data_type)
755 {
756 case DataType::U8:
757 return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
758 case DataType::S8:
759 return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
760 case DataType::U16:
761 return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
762 case DataType::S16:
763 return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
764 case DataType::U32:
765 return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
766 case DataType::S32:
767 return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
768 case DataType::U64:
769 return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
770 case DataType::S64:
771 return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
772 case DataType::F32:
773 return endianness + "f" + support::cpp11::to_string(sizeof(float));
774 case DataType::F64:
775 return endianness + "f" + support::cpp11::to_string(sizeof(double));
776 case DataType::SIZET:
777 return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
778 default:
779 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
780 }
781}
Joel Liang1c5ffd62017-12-28 10:09:51 +0800782
783/** Sync if necessary.
784 */
785template <typename TensorType>
786inline void sync_if_necessary()
787{
788#ifdef ARM_COMPUTE_CL
789 if(opencl_is_available() && std::is_same<typename std::decay<TensorType>::type, arm_compute::CLTensor>::value)
790 {
791 CLScheduler::get().sync();
792 }
793#endif /* ARM_COMPUTE_CL */
794}
795
796/** Sync tensor if necessary.
797 *
798 * @note: If the destination tensor not being used on OpenGL ES, GPU will optimize out the operation.
799 *
800 * @param[in] tensor Tensor to be sync.
801 */
802template <typename TensorType>
803inline void sync_tensor_if_necessary(TensorType &tensor)
804{
805#ifdef ARM_COMPUTE_GC
806 if(opengles31_is_available() && std::is_same<typename std::decay<TensorType>::type, arm_compute::GCTensor>::value)
807 {
808 // Force sync the tensor by calling map and unmap.
809 IGCTensor &t = dynamic_cast<IGCTensor &>(tensor);
810 t.map();
811 t.unmap();
812 }
Michalis Spyrou6bff1952019-10-02 17:22:11 +0100813#else /* ARM_COMPUTE_GC */
814 ARM_COMPUTE_UNUSED(tensor);
Joel Liang1c5ffd62017-12-28 10:09:51 +0800815#endif /* ARM_COMPUTE_GC */
816}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100817} // namespace test
818} // namespace arm_compute
Anthony Barbierac69aa12017-07-03 17:39:37 +0100819#endif /* __ARM_COMPUTE_TEST_UTILS_H__ */