blob: b3974836ae17645435a2588500353738e90a3677 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_TENSOR_LIBRARY_H__
25#define __ARM_COMPUTE_TEST_TENSOR_LIBRARY_H__
26
27#include "RawTensor.h"
28#include "TensorCache.h"
29#include "Utils.h"
30
31#include "arm_compute/core/Coordinates.h"
32#include "arm_compute/core/Error.h"
33#include "arm_compute/core/Helpers.h"
34#include "arm_compute/core/TensorInfo.h"
35#include "arm_compute/core/TensorShape.h"
36#include "arm_compute/core/Types.h"
37#include "arm_compute/core/Window.h"
38
39#include <algorithm>
40#include <cstddef>
41#include <fstream>
42#include <random>
43#include <string>
44#include <type_traits>
45
46namespace arm_compute
47{
48namespace test
49{
50/** Factory class to create and fill tensors.
51 *
52 * Allows to initialise tensors from loaded images or by specifying the shape
53 * explicitly. Furthermore, provides methods to fill tensors with the content of
54 * loaded images or with random values.
55 */
56class TensorLibrary final
57{
58public:
59 /** Initialises the library with a @p path to the image directory.
60 *
61 * @param[in] path Path to load images from.
62 */
63 TensorLibrary(std::string path);
64
65 /** Initialises the library with a @p path to the image directory.
66 * Furthermore, sets the seed for the random generator to @p seed.
67 *
68 * @param[in] path Path to load images from.
69 * @param[in] seed Seed used to initialise the random number generator.
70 */
71 TensorLibrary(std::string path, std::random_device::result_type seed);
72
73 /** Seed that is used to fill tensors with random values. */
74 std::random_device::result_type seed() const;
75
76 /** Creates an uninitialised raw tensor with the given @p shape, @p
77 * data_type and @p num_channels.
78 *
79 * @param[in] shape Shape used to initialise the tensor.
80 * @param[in] data_type Data type used to initialise the tensor.
81 * @param[in] num_channels (Optional) Number of channels used to initialise the tensor.
82 * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers
83 */
84 static RawTensor get(const TensorShape &shape, DataType data_type, int num_channels = 1, int fixed_point_position = 0);
85
86 /** Creates an uninitialised raw tensor with the given @p shape and @p format.
87 *
88 * @param[in] shape Shape used to initialise the tensor.
89 * @param[in] format Format used to initialise the tensor.
90 */
91 static RawTensor get(const TensorShape &shape, Format format);
92
93 /** Provides a contant raw tensor for the specified image.
94 *
95 * @param[in] name Image file used to look up the raw tensor.
96 */
97 const RawTensor &get(const std::string &name) const;
98
99 /** Provides a raw tensor for the specified image.
100 *
101 * @param[in] name Image file used to look up the raw tensor.
102 */
103 RawTensor get(const std::string &name);
104
105 /** Creates an uninitialised raw tensor with the given @p data_type and @p
106 * num_channels. The shape is derived from the specified image.
107 *
108 * @param[in] name Image file used to initialise the tensor.
109 * @param[in] data_type Data type used to initialise the tensor.
110 * @param[in] num_channels Number of channels used to initialise the tensor.
111 */
112 RawTensor get(const std::string &name, DataType data_type, int num_channels = 1) const;
113
114 /** Provides a contant raw tensor for the specified image after it has been
115 * converted to @p format.
116 *
117 * @param[in] name Image file used to look up the raw tensor.
118 * @param[in] format Format used to look up the raw tensor.
119 */
120 const RawTensor &get(const std::string &name, Format format) const;
121
122 /** Provides a raw tensor for the specified image after it has been
123 * converted to @p format.
124 *
125 * @param[in] name Image file used to look up the raw tensor.
126 * @param[in] format Format used to look up the raw tensor.
127 */
128 RawTensor get(const std::string &name, Format format);
129
130 /** Provides a contant raw tensor for the specified channel after it has
131 * been extracted form the given image.
132 *
133 * @param[in] name Image file used to look up the raw tensor.
134 * @param[in] channel Channel used to look up the raw tensor.
135 *
136 * @note The channel has to be unambiguous so that the format can be
137 * inferred automatically.
138 */
139 const RawTensor &get(const std::string &name, Channel channel) const;
140
141 /** Provides a raw tensor for the specified channel after it has been
142 * extracted form the given image.
143 *
144 * @param[in] name Image file used to look up the raw tensor.
145 * @param[in] channel Channel used to look up the raw tensor.
146 *
147 * @note The channel has to be unambiguous so that the format can be
148 * inferred automatically.
149 */
150 RawTensor get(const std::string &name, Channel channel);
151
152 /** Provides a constant raw tensor for the specified channel after it has
153 * been extracted form the given image formatted to @p format.
154 *
155 * @param[in] name Image file used to look up the raw tensor.
156 * @param[in] format Format used to look up the raw tensor.
157 * @param[in] channel Channel used to look up the raw tensor.
158 */
159 const RawTensor &get(const std::string &name, Format format, Channel channel) const;
160
161 /** Provides a raw tensor for the specified channel after it has been
162 * extracted form the given image formatted to @p format.
163 *
164 * @param[in] name Image file used to look up the raw tensor.
165 * @param[in] format Format used to look up the raw tensor.
166 * @param[in] channel Channel used to look up the raw tensor.
167 */
168 RawTensor get(const std::string &name, Format format, Channel channel);
169
170 /** Fills the specified @p tensor with random values drawn from @p
171 * distribution.
172 *
173 * @param[in, out] tensor To be filled tensor.
174 * @param[in] distribution Distribution used to fill the tensor.
175 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
176 *
177 * @note The @p distribution has to provide operator(Generator &) which
178 * will be used to draw samples.
179 */
180 template <typename T, typename D>
181 void fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const;
182
183 /** Fills the specified @p raw tensor with random values drawn from @p
184 * distribution.
185 *
186 * @param[in, out] raw To be filled raw.
187 * @param[in] distribution Distribution used to fill the tensor.
188 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
189 *
190 * @note The @p distribution has to provide operator(Generator &) which
191 * will be used to draw samples.
192 */
193 template <typename D>
194 void fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const;
195
196 /** Fills the specified @p tensor with the content of the specified image
197 * converted to the given format.
198 *
199 * @param[in, out] tensor To be filled tensor.
200 * @param[in] name Image file used to fill the tensor.
201 * @param[in] format Format of the image used to fill the tensor.
202 *
203 * @warning No check is performed that the specified format actually
204 * matches the format of the tensor.
205 */
206 template <typename T>
207 void fill(T &&tensor, const std::string &name, Format format) const;
208
209 /** Fills the raw tensor with the content of the specified image
210 * converted to the given format.
211 *
212 * @param[in, out] raw To be filled raw tensor.
213 * @param[in] name Image file used to fill the tensor.
214 * @param[in] format Format of the image used to fill the tensor.
215 *
216 * @warning No check is performed that the specified format actually
217 * matches the format of the tensor.
218 */
219 void fill(RawTensor &raw, const std::string &name, Format format) const;
220
221 /** Fills the specified @p tensor with the content of the specified channel
222 * extracted from the given image.
223 *
224 * @param[in, out] tensor To be filled tensor.
225 * @param[in] name Image file used to fill the tensor.
226 * @param[in] channel Channel of the image used to fill the tensor.
227 *
228 * @note The channel has to be unambiguous so that the format can be
229 * inferred automatically.
230 *
231 * @warning No check is performed that the specified format actually
232 * matches the format of the tensor.
233 */
234 template <typename T>
235 void fill(T &&tensor, const std::string &name, Channel channel) const;
236
237 /** Fills the raw tensor with the content of the specified channel
238 * extracted from the given image.
239 *
240 * @param[in, out] raw To be filled raw tensor.
241 * @param[in] name Image file used to fill the tensor.
242 * @param[in] channel Channel of the image used to fill the tensor.
243 *
244 * @note The channel has to be unambiguous so that the format can be
245 * inferred automatically.
246 *
247 * @warning No check is performed that the specified format actually
248 * matches the format of the tensor.
249 */
250 void fill(RawTensor &raw, const std::string &name, Channel channel) const;
251
252 /** Fills the specified @p tensor with the content of the specified channel
253 * extracted from the given image after it has been converted to the given
254 * format.
255 *
256 * @param[in, out] tensor To be filled tensor.
257 * @param[in] name Image file used to fill the tensor.
258 * @param[in] format Format of the image used to fill the tensor.
259 * @param[in] channel Channel of the image used to fill the tensor.
260 *
261 * @warning No check is performed that the specified format actually
262 * matches the format of the tensor.
263 */
264 template <typename T>
265 void fill(T &&tensor, const std::string &name, Format format, Channel channel) const;
266
267 /** Fills the raw tensor with the content of the specified channel
268 * extracted from the given image after it has been converted to the given
269 * format.
270 *
271 * @param[in, out] raw To be filled raw tensor.
272 * @param[in] name Image file used to fill the tensor.
273 * @param[in] format Format of the image used to fill the tensor.
274 * @param[in] channel Channel of the image used to fill the tensor.
275 *
276 * @warning No check is performed that the specified format actually
277 * matches the format of the tensor.
278 */
279 void fill(RawTensor &raw, const std::string &name, Format format, Channel channel) const;
280
281 /** Fill a tensor with uniform distribution across the range of its type
282 *
283 * @param[in, out] tensor To be filled tensor.
284 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
285 */
286 template <typename T>
287 void fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset) const;
288
289 /** Fill a tensor with uniform distribution across the a specified range
290 *
291 * @param[in, out] tensor To be filled tensor.
292 * @param[in] seed_offset The offset will be added to the global seed before initialising the random generator.
293 * @param[in] low lowest value in the range (inclusive)
294 * @param[in] high highest value in the range (inclusive)
295 *
296 * @note @p low and @p high must be of the same type as the data type of @p tensor
297 */
298 template <typename T, typename D>
299 void fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset, D low, D high) const;
300
301 /** Fills the specified @p tensor with data loaded from binary in specified path.
302 *
303 * @param[in, out] tensor To be filled tensor.
304 * @param[in] name Data file.
305 */
306 template <typename T>
307 void fill_layer_data(T &&tensor, std::string name) const;
308
309private:
310 // Function prototype to convert between image formats.
311 using Converter = void (*)(const RawTensor &src, RawTensor &dst);
312 // Function prototype to extract a channel from an image.
313 using Extractor = void (*)(const RawTensor &src, RawTensor &dst);
314 // Function prototype to load an image file.
315 using Loader = RawTensor (*)(const std::string &path);
316
317 const Converter &get_converter(Format src, Format dst) const;
318 const Converter &get_converter(DataType src, Format dst) const;
319 const Converter &get_converter(Format src, DataType dst) const;
320 const Converter &get_converter(DataType src, DataType dst) const;
321 const Extractor &get_extractor(Format format, Channel) const;
322 const Loader &get_loader(const std::string &extension) const;
323
324 /** Creates a raw tensor from the specified image.
325 *
326 * @param[in] name To be loaded image file.
327 *
328 * @note If use_single_image is true @p name is ignored and the user image
329 * is loaded instead.
330 */
331 RawTensor load_image(const std::string &name) const;
332
333 /** Provides a raw tensor for the specified image and format.
334 *
335 * @param[in] name Image file used to look up the raw tensor.
336 * @param[in] format Format used to look up the raw tensor.
337 *
338 * If the tensor has already been requested before the cached version will
339 * be returned. Otherwise the tensor will be added to the cache.
340 *
341 * @note If use_single_image is true @p name is ignored and the user image
342 * is loaded instead.
343 */
344 const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format) const;
345
346 /** Provides a raw tensor for the specified image, format and channel.
347 *
348 * @param[in] name Image file used to look up the raw tensor.
349 * @param[in] format Format used to look up the raw tensor.
350 * @param[in] channel Channel used to look up the raw tensor.
351 *
352 * If the tensor has already been requested before the cached version will
353 * be returned. Otherwise the tensor will be added to the cache.
354 *
355 * @note If use_single_image is true @p name is ignored and the user image
356 * is loaded instead.
357 */
358 const RawTensor &find_or_create_raw_tensor(const std::string &name, Format format, Channel channel) const;
359
360 mutable TensorCache _cache{};
361 mutable std::mutex _format_lock{};
362 mutable std::mutex _channel_lock{};
363 std::string _library_path;
364 std::random_device::result_type _seed;
365};
366
367template <typename T, typename D>
368void TensorLibrary::fill(T &&tensor, D &&distribution, std::random_device::result_type seed_offset) const
369{
370 Window window;
371 for(unsigned int d = 0; d < tensor.shape().num_dimensions(); ++d)
372 {
373 window.set(d, Window::Dimension(0, tensor.shape()[d], 1));
374 }
375
376 std::mt19937 gen(_seed + seed_offset);
377
378 //FIXME: Replace with normal loop
379 execute_window_loop(window, [&](const Coordinates & id)
380 {
381 using ResultType = typename std::remove_reference<D>::type::result_type;
382 const ResultType value = distribution(gen);
383 void *const out_ptr = tensor(id);
384 store_value_with_data_type(out_ptr, value, tensor.data_type());
385 });
386}
387
388template <typename D>
389void TensorLibrary::fill(RawTensor &raw, D &&distribution, std::random_device::result_type seed_offset) const
390{
391 std::mt19937 gen(_seed + seed_offset);
392
393 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
394 {
395 using ResultType = typename std::remove_reference<D>::type::result_type;
396 const ResultType value = distribution(gen);
397 store_value_with_data_type(raw.data() + offset, value, raw.data_type());
398 }
399}
400
401template <typename T>
402void TensorLibrary::fill(T &&tensor, const std::string &name, Format format) const
403{
404 const RawTensor &raw = get(name, format);
405
406 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
407 {
408 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
409
410 const RawTensor::BufferType *const raw_ptr = raw.data() + offset;
411 const auto out_ptr = static_cast<RawTensor::BufferType *>(tensor(id));
412 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
413 }
414}
415
416template <typename T>
417void TensorLibrary::fill(T &&tensor, const std::string &name, Channel channel) const
418{
419 fill(std::forward<T>(tensor), name, get_format_for_channel(channel), channel);
420}
421
422template <typename T>
423void TensorLibrary::fill(T &&tensor, const std::string &name, Format format, Channel channel) const
424{
425 const RawTensor &raw = get(name, format, channel);
426
427 for(size_t offset = 0; offset < raw.size(); offset += raw.element_size())
428 {
429 const Coordinates id = index2coord(raw.shape(), offset / raw.element_size());
430
431 const RawTensor::BufferType *const raw_ptr = raw.data() + offset;
432 const auto out_ptr = static_cast<RawTensor::BufferType *>(tensor(id));
433 std::copy_n(raw_ptr, raw.element_size(), out_ptr);
434 }
435}
436
437template <typename T>
438void TensorLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset) const
439{
440 switch(tensor.data_type())
441 {
442 case DataType::U8:
443 {
444 std::uniform_int_distribution<uint8_t> distribution_u8(std::numeric_limits<uint8_t>::lowest(), std::numeric_limits<uint8_t>::max());
445 fill(tensor, distribution_u8, seed_offset);
446 break;
447 }
448 case DataType::S8:
449 case DataType::QS8:
450 {
451 std::uniform_int_distribution<int8_t> distribution_s8(std::numeric_limits<int8_t>::lowest(), std::numeric_limits<int8_t>::max());
452 fill(tensor, distribution_s8, seed_offset);
453 break;
454 }
455 case DataType::U16:
456 {
457 std::uniform_int_distribution<uint16_t> distribution_u16(std::numeric_limits<uint16_t>::lowest(), std::numeric_limits<uint16_t>::max());
458 fill(tensor, distribution_u16, seed_offset);
459 break;
460 }
461 case DataType::S16:
462 {
463 std::uniform_int_distribution<int16_t> distribution_s16(std::numeric_limits<int16_t>::lowest(), std::numeric_limits<int16_t>::max());
464 fill(tensor, distribution_s16, seed_offset);
465 break;
466 }
467 case DataType::U32:
468 {
469 std::uniform_int_distribution<uint32_t> distribution_u32(std::numeric_limits<uint32_t>::lowest(), std::numeric_limits<uint32_t>::max());
470 fill(tensor, distribution_u32, seed_offset);
471 break;
472 }
473 case DataType::S32:
474 {
475 std::uniform_int_distribution<int32_t> distribution_s32(std::numeric_limits<int32_t>::lowest(), std::numeric_limits<int32_t>::max());
476 fill(tensor, distribution_s32, seed_offset);
477 break;
478 }
479 case DataType::U64:
480 {
481 std::uniform_int_distribution<uint64_t> distribution_u64(std::numeric_limits<uint64_t>::lowest(), std::numeric_limits<uint64_t>::max());
482 fill(tensor, distribution_u64, seed_offset);
483 break;
484 }
485 case DataType::S64:
486 {
487 std::uniform_int_distribution<int64_t> distribution_s64(std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max());
488 fill(tensor, distribution_s64, seed_offset);
489 break;
490 }
491#ifdef ENABLE_FP16
492 case DataType::F16:
493 {
494 std::uniform_real_distribution<float16_t> distribution_f16(std::numeric_limits<float16_t>::lowest(), std::numeric_limits<float16_t>::max());
495 fill(tensor, distribution_f16, seed_offset);
496 break;
497 }
498#endif
499 case DataType::F32:
500 {
501 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
502 std::uniform_real_distribution<float> distribution_f32(-1000.f, 1000.f);
503 fill(tensor, distribution_f32, seed_offset);
504 break;
505 }
506 case DataType::F64:
507 {
508 // It doesn't make sense to check [-inf, inf], so hard code it to a big number
509 std::uniform_real_distribution<double> distribution_f64(-1000.f, 1000.f);
510 fill(tensor, distribution_f64, seed_offset);
511 break;
512 }
513 case DataType::SIZET:
514 {
515 std::uniform_int_distribution<size_t> distribution_sizet(std::numeric_limits<size_t>::lowest(), std::numeric_limits<size_t>::max());
516 fill(tensor, distribution_sizet, seed_offset);
517 break;
518 }
519 default:
520 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
521 }
522}
523
524template <typename T, typename D>
525void TensorLibrary::fill_tensor_uniform(T &&tensor, std::random_device::result_type seed_offset, D low, D high) const
526{
527 switch(tensor.data_type())
528 {
529 case DataType::U8:
530 {
531 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint8_t, D>::value));
532 std::uniform_int_distribution<uint8_t> distribution_u8(low, high);
533 fill(tensor, distribution_u8, seed_offset);
534 break;
535 }
536 case DataType::S8:
537 case DataType::QS8:
538 {
539 ARM_COMPUTE_ERROR_ON(!(std::is_same<int8_t, D>::value));
540 std::uniform_int_distribution<int8_t> distribution_s8(low, high);
541 fill(tensor, distribution_s8, seed_offset);
542 break;
543 }
544 case DataType::U16:
545 {
546 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint16_t, D>::value));
547 std::uniform_int_distribution<uint16_t> distribution_u16(low, high);
548 fill(tensor, distribution_u16, seed_offset);
549 break;
550 }
551 case DataType::S16:
552 {
553 ARM_COMPUTE_ERROR_ON(!(std::is_same<int16_t, D>::value));
554 std::uniform_int_distribution<int16_t> distribution_s16(low, high);
555 fill(tensor, distribution_s16, seed_offset);
556 break;
557 }
558 case DataType::U32:
559 {
560 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint32_t, D>::value));
561 std::uniform_int_distribution<uint32_t> distribution_u32(low, high);
562 fill(tensor, distribution_u32, seed_offset);
563 break;
564 }
565 case DataType::S32:
566 {
567 ARM_COMPUTE_ERROR_ON(!(std::is_same<int32_t, D>::value));
568 std::uniform_int_distribution<int32_t> distribution_s32(low, high);
569 fill(tensor, distribution_s32, seed_offset);
570 break;
571 }
572 case DataType::U64:
573 {
574 ARM_COMPUTE_ERROR_ON(!(std::is_same<uint64_t, D>::value));
575 std::uniform_int_distribution<uint64_t> distribution_u64(low, high);
576 fill(tensor, distribution_u64, seed_offset);
577 break;
578 }
579 case DataType::S64:
580 {
581 ARM_COMPUTE_ERROR_ON(!(std::is_same<int64_t, D>::value));
582 std::uniform_int_distribution<int64_t> distribution_s64(low, high);
583 fill(tensor, distribution_s64, seed_offset);
584 break;
585 }
586#if ENABLE_FP16
587 case DataType::F16:
588 {
589 ARM_COMPUTE_ERROR_ON(!(std::is_same<float16_t, D>::value));
590 std::uniform_real_distribution<float16_t> distribution_f16(low, high);
591 fill(tensor, distribution_f16, seed_offset);
592 break;
593 }
594#endif
595 case DataType::F32:
596 {
597 ARM_COMPUTE_ERROR_ON(!(std::is_same<float, D>::value));
598 std::uniform_real_distribution<float> distribution_f32(low, high);
599 fill(tensor, distribution_f32, seed_offset);
600 break;
601 }
602 case DataType::F64:
603 {
604 ARM_COMPUTE_ERROR_ON(!(std::is_same<double, D>::value));
605 std::uniform_real_distribution<double> distribution_f64(low, high);
606 fill(tensor, distribution_f64, seed_offset);
607 break;
608 }
609 case DataType::SIZET:
610 {
611 ARM_COMPUTE_ERROR_ON(!(std::is_same<size_t, D>::value));
612 std::uniform_int_distribution<size_t> distribution_sizet(low, high);
613 fill(tensor, distribution_sizet, seed_offset);
614 break;
615 }
616 default:
617 ARM_COMPUTE_ERROR("NOT SUPPORTED!");
618 }
619}
620
621template <typename T>
622void TensorLibrary::fill_layer_data(T &&tensor, std::string name) const
623{
624#ifdef _WIN32
625 const std::string path_separator("\\");
626#else
627 const std::string path_separator("/");
628#endif
629
630 const std::string path = _library_path + path_separator + name;
631
632 // Open file
633 std::ifstream file(path, std::ios::in | std::ios::binary);
634 if(!file.good())
635 {
636 throw std::runtime_error("Could not load binary data: " + path);
637 }
638
639 Window window;
640 for(unsigned int d = 0; d < tensor.shape().num_dimensions(); ++d)
641 {
642 window.set(d, Window::Dimension(0, tensor.shape()[d], 1));
643 }
644
645 //FIXME : Replace with normal loop
646 execute_window_loop(window, [&](const Coordinates & id)
647 {
648 float val;
649 file.read(reinterpret_cast<char *>(&val), sizeof(float));
650 void *const out_ptr = tensor(id);
651 store_value_with_data_type(out_ptr, val, tensor.data_type());
652 });
653}
654} // namespace test
655} // namespace arm_compute
656#endif