blob: 6c079b687263cce3f505e6aa84a2f63cbd1a2f8b [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_TENSOR_LIBRARY_H__
25#define __ARM_COMPUTE_TEST_TENSOR_LIBRARY_H__
26
27#include "RawTensor.h"
28#include "TensorCache.h"
29#include "Utils.h"
30
31#include "arm_compute/core/Coordinates.h"
32#include "arm_compute/core/Error.h"
33#include "arm_compute/core/Helpers.h"
34#include "arm_compute/core/TensorInfo.h"
35#include "arm_compute/core/TensorShape.h"
36#include "arm_compute/core/Types.h"
37#include "arm_compute/core/Window.h"
38
39#include <algorithm>
40#include <cstddef>
41#include <fstream>
42#include <random>
43#include <string>
44#include <type_traits>
45
Pablo Tello383deec2017-06-23 10:40:05 +010046#if ARM_COMPUTE_ENABLE_FP16
47#include <arm_fp16.h> // needed for float16_t
Anthony Barbierac69aa12017-07-03 17:39:37 +010048#endif /* ARM_COMPUTE_ENABLE_FP16 */
Pablo Tello383deec2017-06-23 10:40:05 +010049
Anthony Barbier6ff3b192017-09-04 18:44:23 +010050namespace arm_compute
51{
52namespace test
53{
54/** Factory class to create and fill tensors.
55 *
56 * Allows to initialise tensors from loaded images or by specifying the shape
57 * explicitly. Furthermore, provides methods to fill tensors with the content of
58 * loaded images or with random values.
59 */
60class TensorLibrary final
61{
62public:
63 /** Initialises the library with a @p path to the image directory.
64 *
65 * @param[in] path Path to load images from.
66 */
67 TensorLibrary(std::string path);
68
69 /** Initialises the library with a @p path to the image directory.
70 * Furthermore, sets the seed for the random generator to @p seed.
71 *
72 * @param[in] path Path to load images from.
73 * @param[in] seed Seed used to initialise the random number generator.
74 */
75 TensorLibrary(std::string path, std::random_device::result_type seed);
76
77 /** Seed that is used to fill tensors with random values. */
78 std::random_device::result_type seed() const;
79
Giorgio Arenafda46182017-06-16 13:57:33 +010080 /** Provides a tensor shape for the specified image.
81 *
82 * @param[in] name Image file used to look up the raw tensor.
83 */
84 TensorShape get_image_shape(const std::string &name);
85
Anthony Barbier6ff3b192017-09-04 18:44:23 +010086 /** Creates an uninitialised raw tensor with the given @p shape, @p
87 * data_type and @p num_channels.
88 *
89 * @param[in] shape Shape used to initialise the tensor.
90 * @param[in] data_type Data type used to initialise the tensor.
91 * @param[in] num_channels (Optional) Number of channels used to initialise the tensor.
92 * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers
93 */
94 static RawTensor get(const TensorShape &shape, DataType data_type, int num_channels = 1, int fixed_point_position = 0);
95
96 /** Creates an uninitialised raw tensor with the given @p shape and @p format.
97 *
98 * @param[in] shape Shape used to initialise the tensor.
99 * @param[in] format Format used to initialise the tensor.
100 */
101 static RawTensor get(const TensorShape &shape, Format format);
102
103 /** Provides a contant raw tensor for the specified image.
104 *
105 * @param[in] name Image file used to look up the raw tensor.
106 */
107 const RawTensor &get(const std::string &name) const;
108
109 /** Provides a raw tensor for the specified image.
110 *
111 * @param[in] name Image file used to look up the raw tensor.
112 */
113 RawTensor get(const std::string &name);
114
115 /** Creates an uninitialised raw tensor with the given @p data_type and @p
116 * num_channels. The shape is derived from the specified image.
117 *
118 * @param[in] name Image file used to initialise the tensor.
119 * @param[in] data_type Data type used to initialise the tensor.
120 * @param[in] num_channels Number of channels used to initialise the tensor.
121 */
122 RawTensor get(const std::string &name, DataType data_type, int num_channels = 1) const;
123
124 /** Provides a contant raw tensor for the specified image after it has been
125 * converted to @p format.
126 *
127 * @param[in] name Image file used to look up the raw tensor.
128 * @param[in] format Format used to look up the raw tensor.
129 */
130 const RawTensor &get(const std::string &name, Format format) const;
131
132 /** Provides a raw tensor for the specified image after it has been
133 * converted to @p format.
134 *
135 * @param[in] name Image file used to look up the raw tensor.
136 * @param[in] format Format used to look up the raw tensor.
137 */
138 RawTensor get(const std::string &name, Format format);
139
140 /** Provides a contant raw tensor for the specified channel after it has
141 * been extracted form the given image.
142 *
143 * @param[in] name Image file used to look up the raw tensor.
144 * @param[in] channel Channel used to look up the raw tensor.
145 *
146 * @note The channel has to be unambiguous so that the format can be
147 * inferred automatically.
148 */
149 const RawTensor &get(const std::string &name, Channel channel) const;
150
151 /** Provides a raw tensor for the specified channel after it has been
152 * extracted form the given image.
153 *
154 * @param[in] name Image file used to look up the raw tensor.
155 * @param[in] channel Channel used to look up the raw tensor.
156 *
157 * @note The channel has to be unambiguous so that the format can be
158 * inferred automatically.
159 */
160 RawTensor get(const std::string &name, Channel channel);
161
162 /** Provides a constant raw tensor for the specified channel after it has
163 * been extracted form the given image formatted to @p format.
164 *
165 * @param[in] name Image file used to look up the raw tensor.
166 * @param[in] format Format used to look up the raw tensor.
167 * @param[in] channel Channel used to look up the raw tensor.
168 */
169 const RawTensor &get(const std::string &name, Format format, Channel channel) const;
170
171 /** Provides a raw tensor for the specified channel after it has been
172 * extracted form the given image formatted to @p format.
173 *
174 * @param[in] name Image file used to look up the raw tensor.
175 * @param[in] format Format used to look up the raw tensor.
176 * @param[in] channel Channel used to look up the raw tensor.
177 */
178 RawTensor get(const std::string &name, Format format, Channel channel);
179
180 /** Fills the specified @p tensor with random values drawn from @p
181 * distribution.
182 *
183 * @param[in, out] tensor To be filled tensor.
184 * @param[in] distribution Distribution used to fill the tensor.
185 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
186 *
187 * @note The @p distribution has to provide operator(Generator &) which
188 * will be used to draw samples.
189 */
190 template <typename T, typename D>
191 void fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const;
192
193 /** Fills the specified @p raw tensor with random values drawn from @p
194 * distribution.
195 *
196 * @param[in, out] raw To be filled raw.
197 * @param[in] distribution Distribution used to fill the tensor.
198 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
199 *
200 * @note The @p distribution has to provide operator(Generator &) which
201 * will be used to draw samples.
202 */
203 template <typename D>
204 void fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const;
205
206 /** Fills the specified @p tensor with the content of the specified image
207 * converted to the given format.
208 *
209 * @param[in, out] tensor To be filled tensor.
210 * @param[in] name Image file used to fill the tensor.
211 * @param[in] format Format of the image used to fill the tensor.
212 *
213 * @warning No check is performed that the specified format actually
214 * matches the format of the tensor.
215 */
216 template <typename T>
217 void fill(T &&tensor, const std::string &name, Format format) const;
218
219 /** Fills the raw tensor with the content of the specified image
220 * converted to the given format.
221 *
222 * @param[in, out] raw To be filled raw tensor.
223 * @param[in] name Image file used to fill the tensor.
224 * @param[in] format Format of the image used to fill the tensor.
225 *
226 * @warning No check is performed that the specified format actually
227 * matches the format of the tensor.
228 */
229 void fill(RawTensor &raw, const std::string &name, Format format) const;
230
231 /** Fills the specified @p tensor with the content of the specified channel
232 * extracted from the given image.
233 *
234 * @param[in, out] tensor To be filled tensor.
235 * @param[in] name Image file used to fill the tensor.
236 * @param[in] channel Channel of the image used to fill the tensor.
237 *
238 * @note The channel has to be unambiguous so that the format can be
239 * inferred automatically.
240 *
241 * @warning No check is performed that the specified format actually
242 * matches the format of the tensor.
243 */
244 template <typename T>
245 void fill(T &&tensor, const std::string &name, Channel channel) const;
246
247 /** Fills the raw tensor with the content of the specified channel
248 * extracted from the given image.
249 *
250 * @param[in, out] raw To be filled raw tensor.
251 * @param[in] name Image file used to fill the tensor.
252 * @param[in] channel Channel of the image used to fill the tensor.
253 *
254 * @note The channel has to be unambiguous so that the format can be
255 * inferred automatically.
256 *
257 * @warning No check is performed that the specified format actually
258 * matches the format of the tensor.
259 */
260 void fill(RawTensor &raw, const std::string &name, Channel channel) const;
261
262 /** Fills the specified @p tensor with the content of the specified channel
263 * extracted from the given image after it has been converted to the given
264 * format.
265 *
266 * @param[in, out] tensor To be filled tensor.
267 * @param[in] name Image file used to fill the tensor.
268 * @param[in] format Format of the image used to fill the tensor.
269 * @param[in] channel Channel of the image used to fill the tensor.
270 *
271 * @warning No check is performed that the specified format actually
272 * matches the format of the tensor.
273 */
274 template <typename T>
275 void fill(T &&tensor, const std::string &name, Format format, Channel channel) const;
276
277 /** Fills the raw tensor with the content of the specified channel
278 * extracted from the given image after it has been converted to the given
279 * format.
280 *
281 * @param[in, out] raw To be filled raw tensor.
282 * @param[in] name Image file used to fill the tensor.
283 * @param[in] format Format of the image used to fill the tensor.
284 * @param[in] channel Channel of the image used to fill the tensor.
285 *
286 * @warning No check is performed that the specified format actually
287 * matches the format of the tensor.
288 */
289 void fill(RawTensor &raw, const std::string &name, Format format, Channel channel) const;
290
291 /** Fill a tensor with uniform distribution across the range of its type
292 *
293 * @param[in, out] tensor To be filled tensor.
294 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
295 */
296 template <typename T>
297 void fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset) const;
298
299 /** Fill a tensor with uniform distribution across the a specified range
300 *
301 * @param[in, out] tensor To be filled tensor.
302 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
303 * @param[in] low lowest value in the range (inclusive)
304 * @param[in] high highest value in the range (inclusive)
305 *
306 * @note @p low and @p high must be of the same type as the data type of @p tensor
307 */
308 template <typename T, typename D>
309 void fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset, D low, D high) const;
310
311 /** Fills the specified @p tensor with data loaded from binary in specified path.
312 *
313 * @param[in, out] tensor To be filled tensor.
314 * @param[in] name Data file.
315 */
316 template <typename T>
317 void fill_layer_data(T &&tensor, std::string name) const;
318
319private:
320 // Function prototype to convert between image formats.
321 using Converter = void (*)(const RawTensor &src, RawTensor &dst);
322 // Function prototype to extract a channel from an image.
323 using Extractor = void (*)(const RawTensor &src, RawTensor &dst);
324 // Function prototype to load an image file.
325 using Loader = RawTensor (*)(const std::string &path);
326
327 const Converter &get_converter(Format src, Format dst) const;
328 const Converter &get_converter(DataType src, Format dst) const;
329 const Converter &get_converter(Format src, DataType dst) const;
330 const Converter &get_converter(DataType src, DataType dst) const;
331 const Extractor &get_extractor(Format format, Channel) const;
332 const Loader &get_loader(const std::string &extension) const;
333
334 /** Creates a raw tensor from the specified image.
335 *
336 * @param[in] name To be loaded image file.
337 *
338 * @note If use_single_image is true @p name is ignored and the user image
339 * is loaded instead.
340 */
341 RawTensor load_image(const std::string &name) const;
342
343 /** Provides a raw tensor for the specified image and format.
344 *
345 * @param[in] name Image file used to look up the raw tensor.
346 * @param[in] format Format used to look up the raw tensor.
347 *
348 * If the tensor has already been requested before the cached version will
349 * be returned. Otherwise the tensor will be added to the cache.
350 *
351 * @note If use_single_image is true @p name is ignored and the user image
352 * is loaded instead.
353 */
354 const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format) const;
355
356 /** Provides a raw tensor for the specified image, format and channel.
357 *
358 * @param[in] name Image file used to look up the raw tensor.
359 * @param[in] format Format used to look up the raw tensor.
360 * @param[in] channel Channel used to look up the raw tensor.
361 *
362 * If the tensor has already been requested before the cached version will
363 * be returned. Otherwise the tensor will be added to the cache.
364 *
365 * @note If use_single_image is true @p name is ignored and the user image
366 * is loaded instead.
367 */
368 const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const;
369
370 mutable TensorCache _cache{};
371 mutable std::mutex _format_lock{};
372 mutable std::mutex _channel_lock{};
Anthony Barbierac69aa12017-07-03 17:39:37 +0100373 const std::string _library_path;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100374 std::random_device::result_type _seed;
375};
376
377template <typename T, typename D>
378void TensorLibrary::fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
379{
380 Window window;
381 for(unsigned int d = 0; d < tensor.shape().num_dimensions(); ++d)
382 {
383 window.set(d, Window::Dimension(0, tensor.shape()[d], 1));
384 }
385
386 std::mt19937 gen(_seed + seed_offset);
387
388 //FIXME: Replace with normal loop
389 execute_window_loop(window, [&](const Coordinates & id)
390 {
391 using ResultType = typename std::remove_reference<D>::type::result_type;
392 const ResultType value = distribution(gen);
393 void *const out_ptr = tensor(id);
394 store_value_with_data_type(out_ptr, value, tensor.data_type());
395 });
396}
397
398template <typename D>
399void TensorLibrary::fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const
400{
401 std::mt19937 gen(_seed + seed_offset);
402
403 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
404 {
405 using ResultType = typename std::remove_reference<D>::type::result_type;
406 const ResultType value = distribution(gen);
407 store_value_with_data_type(raw.data() + offset, value, raw.data_type());
408 }
409}
410
411template <typename T>
412void TensorLibrary::fill(T &&tensor, const std::string &name, Format format) const
413{
414 const RawTensor &raw = get(name, format);
415
416 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
417 {
418 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
419
420 const RawTensor::BufferType *const raw_ptr = raw.data() + offset;
421 const auto out_ptr = static_cast<RawTensor::BufferType *>(tensor(id));
422 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
423 }
424}
425
426template <typename T>
427void TensorLibrary::fill(T &&tensor, const std::string &name, Channel channel) const
428{
429 fill(std::forward<T>(tensor), name, get_format_for_channel(channel), channel);
430}
431
432template <typename T>
433void TensorLibrary::fill(T &&tensor, const std::string &name, Format format, Channel channel) const
434{
435 const RawTensor &raw = get(name, format, channel);
436
437 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
438 {
439 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
440
441 const RawTensor::BufferType *const raw_ptr = raw.data() + offset;
442 const auto out_ptr = static_cast<RawTensor::BufferType *>(tensor(id));
443 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
444 }
445}
446
447template <typename T>
448void TensorLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset) const
449{
450 switch(tensor.data_type())
451 {
452 case DataType::U8:
453 {
454 std::uniform_int_distribution<uint8_t> distribution_u8(std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max());
455 fill(tensor, distribution_u8, seed_offset);
456 break;
457 }
458 case DataType::S8:
459 case DataType::QS8:
460 {
461 std::uniform_int_distribution<int8_t> distribution_s8(std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max());
462 fill(tensor, distribution_s8, seed_offset);
463 break;
464 }
465 case DataType::U16:
466 {
467 std::uniform_int_distribution<uint16_t> distribution_u16(std::numeric_limits<uint16_t>::lowest(), std::numeric_limits<uint16_t>::max());
468 fill(tensor, distribution_u16, seed_offset);
469 break;
470 }
471 case DataType::S16:
472 {
473 std::uniform_int_distribution<int16_t> distribution_s16(std::numeric_limits<int16_t>::lowest(), std::numeric_limits<int16_t>::max());
474 fill(tensor, distribution_s16, seed_offset);
475 break;
476 }
477 case DataType::U32:
478 {
479 std::uniform_int_distribution<uint32_t> distribution_u32(std::numeric_limits<uint32_t>::lowest(), std::numeric_limits<uint32_t>::max());
480 fill(tensor, distribution_u32, seed_offset);
481 break;
482 }
483 case DataType::S32:
484 {
485 std::uniform_int_distribution<int32_t> distribution_s32(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
486 fill(tensor, distribution_s32, seed_offset);
487 break;
488 }
489 case DataType::U64:
490 {
491 std::uniform_int_distribution<uint64_t> distribution_u64(std::numeric_limits<uint64_t>::lowest(), std::numeric_limits<uint64_t>::max());
492 fill(tensor, distribution_u64, seed_offset);
493 break;
494 }
495 case DataType::S64:
496 {
497 std::uniform_int_distribution<int64_t> distribution_s64(std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max());
498 fill(tensor, distribution_s64, seed_offset);
499 break;
500 }
Pablo Tello383deec2017-06-23 10:40:05 +0100501#if ARM_COMPUTE_ENABLE_FP16
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100502 case DataType::F16:
503 {
Pablo Tello997aba22017-06-28 11:32:05 +0100504 std::uniform_real_distribution<float> distribution_f16(-1000.f, 1000.f);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100505 fill(tensor, distribution_f16, seed_offset);
506 break;
507 }
Pablo Tello221f3812017-06-28 17:27:56 +0100508#endif /* ARM_COMPUTE_ENABLE_FP16 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100509 case DataType::F32:
510 {
511 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
512 std::uniform_real_distribution<float> distribution_f32(-1000.f, 1000.f);
513 fill(tensor, distribution_f32, seed_offset);
514 break;
515 }
516 case DataType::F64:
517 {
518 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
519 std::uniform_real_distribution<double> distribution_f64(-1000.f, 1000.f);
520 fill(tensor, distribution_f64, seed_offset);
521 break;
522 }
523 case DataType::SIZET:
524 {
525 std::uniform_int_distribution<size_t> distribution_sizet(std::numeric_limits<size_t>::lowest(), std::numeric_limits<size_t>::max());
526 fill(tensor, distribution_sizet, seed_offset);
527 break;
528 }
529 default:
530 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
531 }
532}
533
534template <typename T, typename D>
535void TensorLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset, D low, D high) const
536{
537 switch(tensor.data_type())
538 {
539 case DataType::U8:
540 {
541 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint8_t, D>::value));
542 std::uniform_int_distribution<uint8_t> distribution_u8(low, high);
543 fill(tensor, distribution_u8, seed_offset);
544 break;
545 }
546 case DataType::S8:
547 case DataType::QS8:
548 {
549 ARM_COMPUTE_ERROR_ON(!(std::is_same<int8_t, D>::value));
550 std::uniform_int_distribution<int8_t> distribution_s8(low, high);
551 fill(tensor, distribution_s8, seed_offset);
552 break;
553 }
554 case DataType::U16:
555 {
556 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint16_t, D>::value));
557 std::uniform_int_distribution<uint16_t> distribution_u16(low, high);
558 fill(tensor, distribution_u16, seed_offset);
559 break;
560 }
561 case DataType::S16:
562 {
563 ARM_COMPUTE_ERROR_ON(!(std::is_same<int16_t, D>::value));
564 std::uniform_int_distribution<int16_t> distribution_s16(low, high);
565 fill(tensor, distribution_s16, seed_offset);
566 break;
567 }
568 case DataType::U32:
569 {
570 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint32_t, D>::value));
571 std::uniform_int_distribution<uint32_t> distribution_u32(low, high);
572 fill(tensor, distribution_u32, seed_offset);
573 break;
574 }
575 case DataType::S32:
576 {
577 ARM_COMPUTE_ERROR_ON(!(std::is_same<int32_t, D>::value));
578 std::uniform_int_distribution<int32_t> distribution_s32(low, high);
579 fill(tensor, distribution_s32, seed_offset);
580 break;
581 }
582 case DataType::U64:
583 {
584 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint64_t, D>::value));
585 std::uniform_int_distribution<uint64_t> distribution_u64(low, high);
586 fill(tensor, distribution_u64, seed_offset);
587 break;
588 }
589 case DataType::S64:
590 {
591 ARM_COMPUTE_ERROR_ON(!(std::is_same<int64_t, D>::value));
592 std::uniform_int_distribution<int64_t> distribution_s64(low, high);
593 fill(tensor, distribution_s64, seed_offset);
594 break;
595 }
Pablo Tello383deec2017-06-23 10:40:05 +0100596#if ARM_COMPUTE_ENABLE_FP16
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100597 case DataType::F16:
598 {
Pablo Tello383deec2017-06-23 10:40:05 +0100599 std::uniform_real_distribution<float_t> distribution_f16(low, high);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100600 fill(tensor, distribution_f16, seed_offset);
601 break;
602 }
Anthony Barbierac69aa12017-07-03 17:39:37 +0100603#endif /* ARM_COMPUTE_ENABLE_FP16 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100604 case DataType::F32:
605 {
606 ARM_COMPUTE_ERROR_ON(!(std::is_same<float, D>::value));
607 std::uniform_real_distribution<float> distribution_f32(low, high);
608 fill(tensor, distribution_f32, seed_offset);
609 break;
610 }
611 case DataType::F64:
612 {
613 ARM_COMPUTE_ERROR_ON(!(std::is_same<double, D>::value));
614 std::uniform_real_distribution<double> distribution_f64(low, high);
615 fill(tensor, distribution_f64, seed_offset);
616 break;
617 }
618 case DataType::SIZET:
619 {
620 ARM_COMPUTE_ERROR_ON(!(std::is_same<size_t, D>::value));
621 std::uniform_int_distribution<size_t> distribution_sizet(low, high);
622 fill(tensor, distribution_sizet, seed_offset);
623 break;
624 }
625 default:
626 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
627 }
628}
629
630template <typename T>
631void TensorLibrary::fill_layer_data(T &&tensor, std::string name) const
632{
633#ifdef _WIN32
634 const std::string path_separator("\\");
Anthony Barbierac69aa12017-07-03 17:39:37 +0100635#else /* _WIN32 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100636 const std::string path_separator("/");
Anthony Barbierac69aa12017-07-03 17:39:37 +0100637#endif /* _WIN32 */
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100638
639 const std::string path = _library_path + path_separator + name;
640
641 // Open file
642 std::ifstream file(path, std::ios::in | std::ios::binary);
643 if(!file.good())
644 {
645 throw std::runtime_error("Could not load binary data: " + path);
646 }
647
648 Window window;
649 for(unsigned int d = 0; d < tensor.shape().num_dimensions(); ++d)
650 {
651 window.set(d, Window::Dimension(0, tensor.shape()[d], 1));
652 }
653
654 //FIXME : Replace with normal loop
655 execute_window_loop(window, [&](const Coordinates & id)
656 {
657 float val;
658 file.read(reinterpret_cast<char *>(&val), sizeof(float));
659 void *const out_ptr = tensor(id);
660 store_value_with_data_type(out_ptr, val, tensor.data_type());
661 });
662}
663} // namespace test
664} // namespace arm_compute
Anthony Barbierac69aa12017-07-03 17:39:37 +0100665#endif /* __ARM_COMPUTE_TEST_TENSOR_LIBRARY_H__ */