blob: 5d24f5c0a1ee1154ec1100bc00f6b6856019533a [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Ioan-Cristian Szabo91d20d92017-10-27 17:35:40 +01002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_UTILS_H__
25#define __ARM_COMPUTE_TEST_UTILS_H__
26
27#include "arm_compute/core/Coordinates.h"
28#include "arm_compute/core/Error.h"
29#include "arm_compute/core/FixedPoint.h"
John Richardson25f23682017-11-27 14:35:09 +000030#include "arm_compute/core/HOGInfo.h"
31#include "arm_compute/core/Size2D.h"
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010032#include "arm_compute/core/TensorInfo.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010033#include "arm_compute/core/TensorShape.h"
34#include "arm_compute/core/Types.h"
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010035#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010036
Joel Liang1c5ffd62017-12-28 10:09:51 +080037#ifdef ARM_COMPUTE_CL
38#include "arm_compute/core/CL/OpenCL.h"
39#include "arm_compute/runtime/CL/CLScheduler.h"
40#endif /* ARM_COMPUTE_CL */
41
42#ifdef ARM_COMPUTE_GC
43#include "arm_compute/core/GLES_COMPUTE/OpenGLES.h"
44#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
45#endif /* ARM_COMPUTE_GC */
46
Anthony Barbier6ff3b192017-09-04 18:44:23 +010047#include <cmath>
48#include <cstddef>
49#include <limits>
50#include <memory>
SiCong Li3e363692017-07-04 15:02:10 +010051#include <random>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010052#include <sstream>
53#include <string>
54#include <type_traits>
SiCong Li3e363692017-07-04 15:02:10 +010055#include <vector>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010056
57namespace arm_compute
58{
Joel Liang1c5ffd62017-12-28 10:09:51 +080059#ifdef ARM_COMPUTE_CL
60class CLTensor;
61#endif /* ARM_COMPUTE_CL */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010062namespace test
63{
Anthony Barbier6ff3b192017-09-04 18:44:23 +010064/** Round floating-point value with half value rounding to positive infinity.
65 *
66 * @param[in] value floating-point value to be rounded.
67 *
68 * @return Floating-point value of rounded @p value.
69 */
70template <typename T, typename = typename std::enable_if<std::is_floating_point<T>::value>::type>
71inline T round_half_up(T value)
72{
73 return std::floor(value + 0.5f);
74}
75
76/** Round floating-point value with half value rounding to nearest even.
77 *
78 * @param[in] value floating-point value to be rounded.
79 * @param[in] epsilon precision.
80 *
81 * @return Floating-point value of rounded @p value.
82 */
83template <typename T, typename = typename std::enable_if<std::is_floating_point<T>::value>::type>
84inline T round_half_even(T value, T epsilon = std::numeric_limits<T>::epsilon())
85{
86 T positive_value = std::abs(value);
87 T ipart = 0;
88 std::modf(positive_value, &ipart);
89 // If 'value' is exactly halfway between two integers
90 if(std::abs(positive_value - (ipart + 0.5f)) < epsilon)
91 {
92 // If 'ipart' is even then return 'ipart'
93 if(std::fmod(ipart, 2.f) < epsilon)
94 {
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010095 return support::cpp11::copysign(ipart, value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010096 }
97 // Else return the nearest even integer
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +010098 return support::cpp11::copysign(std::ceil(ipart + 0.5f), value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010099 }
100 // Otherwise use the usual round to closest
Moritz Pflanzerd0ae8b82017-06-29 14:51:57 +0100101 return support::cpp11::copysign(support::cpp11::round(positive_value), value);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100102}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103
104namespace traits
105{
106// *INDENT-OFF*
107// clang-format off
108template <typename T> struct promote { };
109template <> struct promote<uint8_t> { using type = uint16_t; };
110template <> struct promote<int8_t> { using type = int16_t; };
111template <> struct promote<uint16_t> { using type = uint32_t; };
112template <> struct promote<int16_t> { using type = int32_t; };
113template <> struct promote<uint32_t> { using type = uint64_t; };
114template <> struct promote<int32_t> { using type = int64_t; };
115template <> struct promote<float> { using type = float; };
Georgios Pinitas583137c2017-08-31 18:12:42 +0100116template <> struct promote<half> { using type = half; };
Pablo Tello383deec2017-06-23 10:40:05 +0100117
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100118
119template <typename T>
120using promote_t = typename promote<T>::type;
121
122template <typename T>
123using make_signed_conditional_t = typename std::conditional<std::is_integral<T>::value, std::make_signed<T>, std::common_type<T>>::type;
John Richardson3c5f9492017-10-04 15:27:37 +0100124
125template <typename T>
126using make_unsigned_conditional_t = typename std::conditional<std::is_integral<T>::value, std::make_unsigned<T>, std::common_type<T>>::type;
127
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100128// clang-format on
129// *INDENT-ON*
130}
131
132/** Look up the format corresponding to a channel.
133 *
134 * @param[in] channel Channel type.
135 *
136 * @return Format that contains the given channel.
137 */
138inline Format get_format_for_channel(Channel channel)
139{
140 switch(channel)
141 {
142 case Channel::R:
143 case Channel::G:
144 case Channel::B:
145 return Format::RGB888;
146 default:
147 throw std::runtime_error("Unsupported channel");
148 }
149}
150
151/** Return the format of a channel.
152 *
153 * @param[in] channel Channel type.
154 *
155 * @return Format of the given channel.
156 */
157inline Format get_channel_format(Channel channel)
158{
159 switch(channel)
160 {
161 case Channel::R:
162 case Channel::G:
163 case Channel::B:
164 return Format::U8;
165 default:
166 throw std::runtime_error("Unsupported channel");
167 }
168}
169
170/** Base case of foldl.
171 *
172 * @return value.
173 */
174template <typename F, typename T>
175inline T foldl(F &&, const T &value)
176{
177 return value;
178}
179
180/** Base case of foldl.
181 *
182 * @return func(value1, value2).
183 */
184template <typename F, typename T, typename U>
185inline auto foldl(F &&func, T &&value1, U &&value2) -> decltype(func(value1, value2))
186{
187 return func(value1, value2);
188}
189
190/** Fold left.
191 *
192 * @param[in] func Binary function to be called.
193 * @param[in] initial Initial value.
194 * @param[in] value Argument passed to the function.
195 * @param[in] values Remaining arguments.
196 */
197template <typename F, typename I, typename T, typename... Vs>
198inline I foldl(F &&func, I &&initial, T &&value, Vs &&... values)
199{
200 return foldl(std::forward<F>(func), func(std::forward<I>(initial), std::forward<T>(value)), std::forward<Vs>(values)...);
201}
202
SiCong Libacaf9a2017-06-19 13:41:45 +0100203/** Create a valid region based on tensor shape, border mode and border size
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100204 *
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000205 * @param[in] a_shape Shape used as size of the valid region.
SiCong Libacaf9a2017-06-19 13:41:45 +0100206 * @param[in] border_undefined (Optional) Boolean indicating if the border mode is undefined.
207 * @param[in] border_size (Optional) Border size used to specify the region to exclude.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100208 *
SiCong Libacaf9a2017-06-19 13:41:45 +0100209 * @return A valid region starting at (0, 0, ...) with size of @p shape if @p border_undefined is false; otherwise
210 * return A valid region starting at (@p border_size.left, @p border_size.top, ...) with reduced size of @p shape.
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100211 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000212inline ValidRegion shape_to_valid_region(const TensorShape &a_shape, bool border_undefined = false, BorderSize border_size = BorderSize(0))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100213{
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000214 ValidRegion valid_region{ Coordinates(), a_shape };
215
216 Coordinates &anchor = valid_region.anchor;
217 TensorShape &shape = valid_region.shape;
Moritz Pflanzera1848362017-08-25 12:30:03 +0100218
SiCong Libacaf9a2017-06-19 13:41:45 +0100219 if(border_undefined)
220 {
221 ARM_COMPUTE_ERROR_ON(shape.num_dimensions() < 2);
Moritz Pflanzera1848362017-08-25 12:30:03 +0100222
SiCong Libacaf9a2017-06-19 13:41:45 +0100223 anchor.set(0, border_size.left);
224 anchor.set(1, border_size.top);
Moritz Pflanzera1848362017-08-25 12:30:03 +0100225
226 const int valid_shape_x = std::max(0, static_cast<int>(shape.x()) - static_cast<int>(border_size.left) - static_cast<int>(border_size.right));
227 const int valid_shape_y = std::max(0, static_cast<int>(shape.y()) - static_cast<int>(border_size.top) - static_cast<int>(border_size.bottom));
228
229 shape.set(0, valid_shape_x);
230 shape.set(1, valid_shape_y);
SiCong Libacaf9a2017-06-19 13:41:45 +0100231 }
Moritz Pflanzera1848362017-08-25 12:30:03 +0100232
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000233 return valid_region;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100234}
235
Gian Marco37908d92017-11-07 14:38:22 +0000236/** Create a valid region for Gaussian Pyramid Half based on tensor shape and valid region at level "i - 1" and border mode
237 *
238 * @note The border size is 2 in case of Gaussian Pyramid Half
239 *
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000240 * @param[in] a_shape Shape used at level "i - 1" of Gaussian Pyramid Half
241 * @param[in] a_valid_region Valid region used at level "i - 1" of Gaussian Pyramid Half
Gian Marco37908d92017-11-07 14:38:22 +0000242 * @param[in] border_undefined (Optional) Boolean indicating if the border mode is undefined.
243 *
244 * return The valid region for the level "i" of Gaussian Pyramid Half
245 */
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000246inline ValidRegion shape_to_valid_region_gaussian_pyramid_half(const TensorShape &a_shape, const ValidRegion &a_valid_region, bool border_undefined = false)
Gian Marco37908d92017-11-07 14:38:22 +0000247{
248 constexpr int border_size = 2;
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000249
250 ValidRegion valid_region{ Coordinates(), a_shape };
251
252 Coordinates &anchor = valid_region.anchor;
253 TensorShape &shape = valid_region.shape;
Gian Marco37908d92017-11-07 14:38:22 +0000254
255 // Compute tensor shape for level "i" of Gaussian Pyramid Half
256 // dst_width = (src_width + 1) * 0.5f
257 // dst_height = (src_height + 1) * 0.5f
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000258 shape.set(0, (shape[0] + 1) * 0.5f);
259 shape.set(1, (shape[1] + 1) * 0.5f);
Gian Marco37908d92017-11-07 14:38:22 +0000260
261 if(border_undefined)
262 {
263 ARM_COMPUTE_ERROR_ON(shape.num_dimensions() < 2);
264
265 // Compute the left and top invalid borders
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000266 float invalid_border_left = static_cast<float>(a_valid_region.anchor.x() + border_size) / 2.0f;
267 float invalid_border_top = static_cast<float>(a_valid_region.anchor.y() + border_size) / 2.0f;
Gian Marco37908d92017-11-07 14:38:22 +0000268
269 // For the new anchor point we can have 2 cases:
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000270 // 1) If the width/height of the tensor shape is odd, we have to take the ceil value of (a_valid_region.anchor.x() + border_size) / 2.0f or (a_valid_region.anchor.y() + border_size / 2.0f
271 // 2) If the width/height of the tensor shape is even, we have to take the floor value of (a_valid_region.anchor.x() + border_size) / 2.0f or (a_valid_region.anchor.y() + border_size) / 2.0f
Gian Marco37908d92017-11-07 14:38:22 +0000272 // In this manner we should be able to propagate correctly the valid region along all levels of the pyramid
273 invalid_border_left = (shape[0] % 2) ? std::ceil(invalid_border_left) : std::floor(invalid_border_left);
274 invalid_border_top = (shape[1] % 2) ? std::ceil(invalid_border_top) : std::floor(invalid_border_top);
275
276 // Set the anchor point
277 anchor.set(0, static_cast<int>(invalid_border_left));
278 anchor.set(1, static_cast<int>(invalid_border_top));
279
280 // Compute shape
281 // Calculate the right and bottom invalid borders at the previous level of the pyramid
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000282 const float prev_invalid_border_right = static_cast<float>(shape[0] - (a_valid_region.anchor.x() + a_valid_region.shape[0]));
283 const float prev_invalid_border_bottom = static_cast<float>(shape[1] - (a_valid_region.anchor.y() + a_valid_region.shape[1]));
Gian Marco37908d92017-11-07 14:38:22 +0000284
285 // Calculate the right and bottom invalid borders at the current level of the pyramid
286 const float invalid_border_right = std::ceil((prev_invalid_border_right + static_cast<float>(border_size)) / 2.0f);
287 const float invalid_border_bottom = std::ceil((prev_invalid_border_bottom + static_cast<float>(border_size)) / 2.0f);
288
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000289 const int valid_shape_x = std::max(0, static_cast<int>(shape.x()) - static_cast<int>(invalid_border_left) - static_cast<int>(invalid_border_right));
290 const int valid_shape_y = std::max(0, static_cast<int>(shape.y()) - static_cast<int>(invalid_border_top) - static_cast<int>(invalid_border_bottom));
Gian Marco37908d92017-11-07 14:38:22 +0000291
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000292 shape.set(0, valid_shape_x);
293 shape.set(1, valid_shape_y);
Gian Marco37908d92017-11-07 14:38:22 +0000294 }
295
Diego Lopez Recasbcbc9702017-12-18 11:28:27 +0000296 return valid_region;
Gian Marco37908d92017-11-07 14:38:22 +0000297}
298
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100299/** Write the value after casting the pointer according to @p data_type.
300 *
301 * @warning The type of the value must match the specified data type.
302 *
303 * @param[out] ptr Pointer to memory where the @p value will be written.
304 * @param[in] value Value that will be written.
305 * @param[in] data_type Data type that will be written.
306 */
307template <typename T>
308void store_value_with_data_type(void *ptr, T value, DataType data_type)
309{
310 switch(data_type)
311 {
312 case DataType::U8:
Chunosovd621bca2017-11-03 17:33:15 +0700313 case DataType::QASYMM8:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100314 *reinterpret_cast<uint8_t *>(ptr) = value;
315 break;
316 case DataType::S8:
317 case DataType::QS8:
318 *reinterpret_cast<int8_t *>(ptr) = value;
319 break;
320 case DataType::U16:
321 *reinterpret_cast<uint16_t *>(ptr) = value;
322 break;
323 case DataType::S16:
Michalis Spyrou0a8334c2017-06-14 18:00:05 +0100324 case DataType::QS16:
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100325 *reinterpret_cast<int16_t *>(ptr) = value;
326 break;
327 case DataType::U32:
328 *reinterpret_cast<uint32_t *>(ptr) = value;
329 break;
330 case DataType::S32:
331 *reinterpret_cast<int32_t *>(ptr) = value;
332 break;
333 case DataType::U64:
334 *reinterpret_cast<uint64_t *>(ptr) = value;
335 break;
336 case DataType::S64:
337 *reinterpret_cast<int64_t *>(ptr) = value;
338 break;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100339 case DataType::F16:
Georgios Pinitas583137c2017-08-31 18:12:42 +0100340 *reinterpret_cast<half *>(ptr) = value;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100341 break;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100342 case DataType::F32:
343 *reinterpret_cast<float *>(ptr) = value;
344 break;
345 case DataType::F64:
346 *reinterpret_cast<double *>(ptr) = value;
347 break;
348 case DataType::SIZET:
349 *reinterpret_cast<size_t *>(ptr) = value;
350 break;
351 default:
352 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
353 }
354}
355
356/** Saturate a value of type T against the numeric limits of type U.
357 *
358 * @param[in] val Value to be saturated.
359 *
360 * @return saturated value.
361 */
362template <typename U, typename T>
363T saturate_cast(T val)
364{
365 if(val > static_cast<T>(std::numeric_limits<U>::max()))
366 {
367 val = static_cast<T>(std::numeric_limits<U>::max());
368 }
369 if(val < static_cast<T>(std::numeric_limits<U>::lowest()))
370 {
371 val = static_cast<T>(std::numeric_limits<U>::lowest());
372 }
373 return val;
374}
375
376/** Find the signed promoted common type.
377 */
378template <typename... T>
379struct common_promoted_signed_type
380{
381 using common_type = typename std::common_type<T...>::type;
382 using promoted_type = traits::promote_t<common_type>;
383 using intermediate_type = typename traits::make_signed_conditional_t<promoted_type>::type;
384};
385
John Richardson3c5f9492017-10-04 15:27:37 +0100386/** Find the unsigned promoted common type.
387 */
388template <typename... T>
389struct common_promoted_unsigned_type
390{
391 using common_type = typename std::common_type<T...>::type;
392 using promoted_type = traits::promote_t<common_type>;
393 using intermediate_type = typename traits::make_unsigned_conditional_t<promoted_type>::type;
394};
395
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100396/** Convert a linear index into n-dimensional coordinates.
397 *
398 * @param[in] shape Shape of the n-dimensional tensor.
399 * @param[in] index Linear index specifying the i-th element.
400 *
401 * @return n-dimensional coordinates.
402 */
403inline Coordinates index2coord(const TensorShape &shape, int index)
404{
405 int num_elements = shape.total_size();
406
407 ARM_COMPUTE_ERROR_ON_MSG(index < 0 || index >= num_elements, "Index has to be in [0, num_elements]");
408 ARM_COMPUTE_ERROR_ON_MSG(num_elements == 0, "Cannot create coordinate from empty shape");
409
410 Coordinates coord{ 0 };
411
412 for(int d = shape.num_dimensions() - 1; d >= 0; --d)
413 {
414 num_elements /= shape[d];
415 coord.set(d, index / num_elements);
416 index %= num_elements;
417 }
418
419 return coord;
420}
421
422/** Linearise the given coordinate.
423 *
424 * Transforms the given coordinate into a linear offset in terms of
425 * elements.
426 *
427 * @param[in] shape Shape of the n-dimensional tensor.
428 * @param[in] coord The to be converted coordinate.
429 *
430 * @return Linear offset to the element.
431 */
432inline int coord2index(const TensorShape &shape, const Coordinates &coord)
433{
434 ARM_COMPUTE_ERROR_ON_MSG(shape.total_size() == 0, "Cannot get index from empty shape");
435 ARM_COMPUTE_ERROR_ON_MSG(coord.num_dimensions() == 0, "Cannot get index of empty coordinate");
436
437 int index = 0;
438 int dim_size = 1;
439
440 for(unsigned int i = 0; i < coord.num_dimensions(); ++i)
441 {
442 index += coord[i] * dim_size;
443 dim_size *= shape[i];
444 }
445
446 return index;
447}
448
449/** Check if a coordinate is within a valid region */
Moritz Pflanzera1848362017-08-25 12:30:03 +0100450inline bool is_in_valid_region(const ValidRegion &valid_region, Coordinates coord)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100451{
Moritz Pflanzer219c6912017-09-23 19:22:51 +0100452 for(size_t d = 0; d < Coordinates::num_max_dimensions; ++d)
Moritz Pflanzera1848362017-08-25 12:30:03 +0100453 {
454 if(coord[d] < valid_region.start(d) || coord[d] >= valid_region.end(d))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100455 {
456 return false;
457 }
458 }
Moritz Pflanzera1848362017-08-25 12:30:03 +0100459
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100460 return true;
461}
Moritz Pflanzer94450f12017-06-30 12:48:43 +0100462
463/** Create and initialize a tensor of the given type.
464 *
465 * @param[in] shape Tensor shape.
466 * @param[in] data_type Data type.
467 * @param[in] num_channels (Optional) Number of channels.
468 * @param[in] fixed_point_position (Optional) Number of fractional bits.
Chunosovd621bca2017-11-03 17:33:15 +0700469 * @param[in] quantization_info (Optional) Quantization info for asymmetric quantized types.
Michalis Spyroucf581f52018-03-02 10:25:59 +0000470 * @param[in] data_layout (Optional) Data layout. Default is NCHW.
Moritz Pflanzer94450f12017-06-30 12:48:43 +0100471 *
472 * @return Initialized tensor of given type.
473 */
474template <typename T>
Chunosovd621bca2017-11-03 17:33:15 +0700475inline T create_tensor(const TensorShape &shape, DataType data_type, int num_channels = 1,
Michalis Spyroucf581f52018-03-02 10:25:59 +0000476 int fixed_point_position = 0, QuantizationInfo quantization_info = QuantizationInfo(), DataLayout data_layout = DataLayout::NCHW)
Moritz Pflanzer94450f12017-06-30 12:48:43 +0100477{
Chunosovd621bca2017-11-03 17:33:15 +0700478 T tensor;
479 TensorInfo info(shape, num_channels, data_type, fixed_point_position);
480 info.set_quantization_info(quantization_info);
Michalis Spyroucf581f52018-03-02 10:25:59 +0000481 info.set_data_layout(data_layout);
Chunosovd621bca2017-11-03 17:33:15 +0700482 tensor.allocator()->init(info);
Moritz Pflanzer94450f12017-06-30 12:48:43 +0100483
484 return tensor;
485}
SiCong Li3e363692017-07-04 15:02:10 +0100486
Ioan-Cristian Szabo2c350182017-12-20 16:27:37 +0000487/** Create and initialize a tensor of the given type.
488 *
489 * @param[in] shape Tensor shape.
490 * @param[in] format Format type.
491 *
492 * @return Initialized tensor of given type.
493 */
494template <typename T>
495inline T create_tensor(const TensorShape &shape, Format format)
496{
497 TensorInfo info(shape, format);
498
499 T tensor;
500 tensor.allocator()->init(info);
501
502 return tensor;
503}
504
Ioan-Cristian Szabo9414f642017-10-27 17:35:40 +0100505/** Create and initialize a multi-image of the given type.
506 *
507 * @param[in] shape Tensor shape.
508 * @param[in] format Format type.
509 *
510 * @return Initialized tensor of given type.
511 */
512template <typename T>
513inline T create_multi_image(const TensorShape &shape, Format format)
514{
515 T multi_image;
516 multi_image.init(shape.x(), shape.y(), format);
517
518 return multi_image;
519}
520
John Richardson25f23682017-11-27 14:35:09 +0000521/** Create and initialize a HOG (Histogram of Oriented Gradients) of the given type.
522 *
523 * @param[in] cell_size Cell size in pixels
524 * @param[in] block_size Block size in pixels. Must be a multiple of cell_size.
525 * @param[in] detection_window_size Detection window size in pixels. Must be a multiple of block_size and block_stride.
526 * @param[in] block_stride Distance in pixels between 2 consecutive blocks along the x and y direction. Must be a multiple of cell size
527 * @param[in] num_bins Number of histogram bins for each cell
528 * @param[in] normalization_type (Optional) Normalization type to use for each block
529 * @param[in] l2_hyst_threshold (Optional) Threshold used for L2HYS_NORM normalization method
530 * @param[in] phase_type (Optional) Type of @ref PhaseType
531 *
532 * @return Initialized HOG of given type.
533 */
534template <typename T>
535inline T create_HOG(const Size2D &cell_size, const Size2D &block_size, const Size2D &detection_window_size, const Size2D &block_stride, size_t num_bins,
536 HOGNormType normalization_type = HOGNormType::L2HYS_NORM, float l2_hyst_threshold = 0.2f, PhaseType phase_type = PhaseType::UNSIGNED)
537{
538 T hog;
539 HOGInfo hog_info(cell_size, block_size, block_size, block_stride, num_bins, normalization_type, l2_hyst_threshold, phase_type);
540 hog.init(hog_info);
541
542 return hog;
543}
544
SiCong Li3e363692017-07-04 15:02:10 +0100545/** Create a vector of random ROIs.
546 *
547 * @param[in] shape The shape of the input tensor.
548 * @param[in] pool_info The ROI pooling information.
549 * @param[in] num_rois The number of ROIs to be created.
550 * @param[in] seed The random seed to be used.
551 *
552 * @return A vector that contains the requested number of random ROIs
553 */
554inline std::vector<ROI> generate_random_rois(const TensorShape &shape, const ROIPoolingLayerInfo &pool_info, unsigned int num_rois, std::random_device::result_type seed)
555{
556 ARM_COMPUTE_ERROR_ON((pool_info.pooled_width() < 4) || (pool_info.pooled_height() < 4));
557
558 std::vector<ROI> rois;
559 std::mt19937 gen(seed);
560 const int pool_width = pool_info.pooled_width();
561 const int pool_height = pool_info.pooled_height();
562 const float roi_scale = pool_info.spatial_scale();
563
564 // Calculate distribution bounds
565 const auto scaled_width = static_cast<int>((shape.x() / roi_scale) / pool_width);
566 const auto scaled_height = static_cast<int>((shape.y() / roi_scale) / pool_height);
567 const auto min_width = static_cast<int>(pool_width / roi_scale);
568 const auto min_height = static_cast<int>(pool_height / roi_scale);
569
570 // Create distributions
571 std::uniform_int_distribution<int> dist_batch(0, shape[3] - 1);
572 std::uniform_int_distribution<int> dist_x(0, scaled_width);
573 std::uniform_int_distribution<int> dist_y(0, scaled_height);
574 std::uniform_int_distribution<int> dist_w(min_width, std::max(min_width, (pool_width - 2) * scaled_width));
575 std::uniform_int_distribution<int> dist_h(min_height, std::max(min_height, (pool_height - 2) * scaled_height));
576
577 for(unsigned int r = 0; r < num_rois; ++r)
578 {
579 ROI roi;
580 roi.batch_idx = dist_batch(gen);
581 roi.rect.x = dist_x(gen);
582 roi.rect.y = dist_y(gen);
583 roi.rect.width = dist_w(gen);
584 roi.rect.height = dist_h(gen);
585 rois.push_back(roi);
586 }
587
588 return rois;
589}
590
591template <typename T, typename ArrayAccessor_T>
592inline void fill_array(ArrayAccessor_T &&array, const std::vector<T> &v)
593{
594 array.resize(v.size());
595 std::memcpy(array.buffer(), v.data(), v.size() * sizeof(T));
596}
SiCong Li86b53332017-08-23 11:02:43 +0100597
598/** Obtain numpy type string from DataType.
599 *
600 * @param[in] data_type Data type.
601 *
602 * @return numpy type string.
603 */
604inline std::string get_typestring(DataType data_type)
605{
606 // Check endianness
607 const unsigned int i = 1;
608 const char *c = reinterpret_cast<const char *>(&i);
609 std::string endianness;
610 if(*c == 1)
611 {
612 endianness = std::string("<");
613 }
614 else
615 {
616 endianness = std::string(">");
617 }
618 const std::string no_endianness("|");
619
620 switch(data_type)
621 {
622 case DataType::U8:
623 return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
624 case DataType::S8:
625 return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
626 case DataType::U16:
627 return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
628 case DataType::S16:
629 return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
630 case DataType::U32:
631 return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
632 case DataType::S32:
633 return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
634 case DataType::U64:
635 return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
636 case DataType::S64:
637 return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
638 case DataType::F32:
639 return endianness + "f" + support::cpp11::to_string(sizeof(float));
640 case DataType::F64:
641 return endianness + "f" + support::cpp11::to_string(sizeof(double));
642 case DataType::SIZET:
643 return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
644 default:
645 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
646 }
647}
Joel Liang1c5ffd62017-12-28 10:09:51 +0800648
649/** Sync if necessary.
650 */
651template <typename TensorType>
652inline void sync_if_necessary()
653{
654#ifdef ARM_COMPUTE_CL
655 if(opencl_is_available() && std::is_same<typename std::decay<TensorType>::type, arm_compute::CLTensor>::value)
656 {
657 CLScheduler::get().sync();
658 }
659#endif /* ARM_COMPUTE_CL */
660}
661
662/** Sync tensor if necessary.
663 *
664 * @note: If the destination tensor not being used on OpenGL ES, GPU will optimize out the operation.
665 *
666 * @param[in] tensor Tensor to be sync.
667 */
668template <typename TensorType>
669inline void sync_tensor_if_necessary(TensorType &tensor)
670{
671#ifdef ARM_COMPUTE_GC
672 if(opengles31_is_available() && std::is_same<typename std::decay<TensorType>::type, arm_compute::GCTensor>::value)
673 {
674 // Force sync the tensor by calling map and unmap.
675 IGCTensor &t = dynamic_cast<IGCTensor &>(tensor);
676 t.map();
677 t.unmap();
678 }
679#endif /* ARM_COMPUTE_GC */
680}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100681} // namespace test
682} // namespace arm_compute
Anthony Barbierac69aa12017-07-03 17:39:37 +0100683#endif /* __ARM_COMPUTE_TEST_UTILS_H__ */