blob: 031a3726a1230c7bde0260aef6541c7034882886 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Giorgio Arenaa66eaa22017-12-21 19:50:06 +00002 * Copyright (c) 2016-2018 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __UTILS_UTILS_H__
25#define __UTILS_UTILS_H__
26
27#include "arm_compute/core/Helpers.h"
28#include "arm_compute/core/ITensor.h"
29#include "arm_compute/core/Types.h"
30#include "arm_compute/core/Validate.h"
steniu01bee466b2017-06-21 16:45:41 +010031#include "arm_compute/core/Window.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010032#include "arm_compute/runtime/Tensor.h"
Giorgio Arenacf3935f2017-10-26 17:14:13 +010033#include "libnpy/npy.hpp"
Anthony Barbier2a07e182017-08-04 18:20:27 +010034#include "support/ToolchainSupport.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035
36#ifdef ARM_COMPUTE_CL
37#include "arm_compute/core/CL/OpenCL.h"
Isabella Gottardi02aabcc2017-10-12 17:28:51 +010038#include "arm_compute/runtime/CL/CLDistribution1D.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010039#include "arm_compute/runtime/CL/CLTensor.h"
40#endif /* ARM_COMPUTE_CL */
Anthony Barbier7068f992017-10-26 15:23:08 +010041#ifdef ARM_COMPUTE_GC
42#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
43#endif /* ARM_COMPUTE_GC */
Anthony Barbier6ff3b192017-09-04 18:44:23 +010044
45#include <cstdlib>
46#include <cstring>
47#include <fstream>
48#include <iostream>
Giorgio Arenacf3935f2017-10-26 17:14:13 +010049#include <random>
50#include <string>
51#include <tuple>
52#include <vector>
Anthony Barbier6ff3b192017-09-04 18:44:23 +010053
54namespace arm_compute
55{
56namespace utils
57{
Georgios Pinitas12be7ab2018-07-03 12:06:23 +010058/** Supported image types */
59enum class ImageType
60{
61 UNKNOWN,
62 PPM,
63 JPEG
64};
65
Anthony Barbier6db0ff52018-01-05 10:59:12 +000066/** Abstract Example class.
67 *
68 * All examples have to inherit from this class.
69 */
70class Example
71{
72public:
Alex Gildayc357c472018-03-21 13:54:09 +000073 /** Setup the example.
74 *
75 * @param[in] argc Argument count.
76 * @param[in] argv Argument values.
Georgios Pinitas12be7ab2018-07-03 12:06:23 +010077 *
78 * @return True in case of no errors in setup else false
Alex Gildayc357c472018-03-21 13:54:09 +000079 */
Georgios Pinitas12be7ab2018-07-03 12:06:23 +010080 virtual bool do_setup(int argc, char **argv)
81 {
82 return true;
83 };
Alex Gildayc357c472018-03-21 13:54:09 +000084 /** Run the example. */
Anthony Barbier6db0ff52018-01-05 10:59:12 +000085 virtual void do_run() {};
Alex Gildayc357c472018-03-21 13:54:09 +000086 /** Teardown the example. */
Anthony Barbier6db0ff52018-01-05 10:59:12 +000087 virtual void do_teardown() {};
88
89 /** Default destructor. */
90 virtual ~Example() = default;
91};
92
93/** Run an example and handle the potential exceptions it throws
94 *
95 * @param[in] argc Number of command line arguments
96 * @param[in] argv Command line arguments
97 * @param[in] example Example to run
98 */
Anthony Barbier9fb0cac2018-04-20 15:46:21 +010099int run_example(int argc, char **argv, std::unique_ptr<Example> example);
Anthony Barbier6db0ff52018-01-05 10:59:12 +0000100
101template <typename T>
102int run_example(int argc, char **argv)
103{
Anthony Barbier9fb0cac2018-04-20 15:46:21 +0100104 return run_example(argc, argv, support::cpp14::make_unique<T>());
Anthony Barbier6db0ff52018-01-05 10:59:12 +0000105}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106
107/** Draw a RGB rectangular window for the detected object
108 *
109 * @param[in, out] tensor Input tensor where the rectangle will be drawn on. Format supported: RGB888
110 * @param[in] rect Geometry of the rectangular window
111 * @param[in] r Red colour to use
112 * @param[in] g Green colour to use
113 * @param[in] b Blue colour to use
114 */
115void draw_detection_rectangle(arm_compute::ITensor *tensor, const arm_compute::DetectionWindow &rect, uint8_t r, uint8_t g, uint8_t b);
116
Georgios Pinitas12be7ab2018-07-03 12:06:23 +0100117/** Gets image type given a file
118 *
119 * @param[in] filename File to identify its image type
120 *
121 * @return Image type
122 */
123ImageType get_image_type_from_file(const std::string &filename);
124
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100125/** Parse the ppm header from an input file stream. At the end of the execution,
126 * the file position pointer will be located at the first pixel stored in the ppm file
127 *
128 * @param[in] fs Input file stream to parse
129 *
130 * @return The width, height and max value stored in the header of the PPM file
131 */
132std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs);
133
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100134/** Parse the npy header from an input file stream. At the end of the execution,
135 * the file position pointer will be located at the first pixel stored in the npy file //TODO
136 *
137 * @param[in] fs Input file stream to parse
138 *
139 * @return The width and height stored in the header of the NPY file
140 */
141std::tuple<std::vector<unsigned long>, bool, std::string> parse_npy_header(std::ifstream &fs);
142
143/** Obtain numpy type string from DataType.
144 *
145 * @param[in] data_type Data type.
146 *
147 * @return numpy type string.
148 */
149inline std::string get_typestring(DataType data_type)
150{
151 // Check endianness
152 const unsigned int i = 1;
153 const char *c = reinterpret_cast<const char *>(&i);
154 std::string endianness;
155 if(*c == 1)
156 {
157 endianness = std::string("<");
158 }
159 else
160 {
161 endianness = std::string(">");
162 }
163 const std::string no_endianness("|");
164
165 switch(data_type)
166 {
167 case DataType::U8:
Giorgio Arenaa66eaa22017-12-21 19:50:06 +0000168 case DataType::QASYMM8:
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100169 return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
170 case DataType::S8:
171 return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
172 case DataType::U16:
173 return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
174 case DataType::S16:
175 return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
176 case DataType::U32:
177 return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
178 case DataType::S32:
179 return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
180 case DataType::U64:
181 return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
182 case DataType::S64:
183 return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000184 case DataType::F16:
185 return endianness + "f" + support::cpp11::to_string(sizeof(half));
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100186 case DataType::F32:
187 return endianness + "f" + support::cpp11::to_string(sizeof(float));
188 case DataType::F64:
189 return endianness + "f" + support::cpp11::to_string(sizeof(double));
190 case DataType::SIZET:
191 return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
192 default:
Georgios Pinitas12be7ab2018-07-03 12:06:23 +0100193 ARM_COMPUTE_ERROR("Data type not supported");
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100194 }
195}
196
Vidhya Sudhan Loganathan5e96be72018-12-18 14:17:00 +0000197/** Returns true if the value can be represented by the given data type
198 *
199 * @param[in] val value to be checked
200 * @param[in] dt data type that is checked
201 * @param[in] quant_info quantization info if the data type is QASYMM8
202 *
203 * @return true if the data type can hold the value.
204 */
205template <typename T>
206bool check_value_range(T val, DataType dt, QuantizationInfo quant_info = QuantizationInfo())
207{
208 switch(dt)
209 {
210 case DataType::U8:
211 return ((static_cast<uint8_t>(val) == val) && val >= std::numeric_limits<uint8_t>::lowest() && val <= std::numeric_limits<uint8_t>::max());
212 case DataType::QASYMM8:
213 {
214 double min = static_cast<double>(quant_info.dequantize(0));
215 double max = static_cast<double>(quant_info.dequantize(std::numeric_limits<uint8_t>::max()));
216 return ((double)val >= min && (double)val <= max);
217 }
218 case DataType::S8:
219 return ((static_cast<int8_t>(val) == val) && val >= std::numeric_limits<int8_t>::lowest() && val <= std::numeric_limits<int8_t>::max());
220 case DataType::U16:
221 return ((static_cast<uint16_t>(val) == val) && val >= std::numeric_limits<uint16_t>::lowest() && val <= std::numeric_limits<uint16_t>::max());
222 case DataType::S16:
223 return ((static_cast<int16_t>(val) == val) && val >= std::numeric_limits<int16_t>::lowest() && val <= std::numeric_limits<int16_t>::max());
224 case DataType::U32:
225 return ((static_cast<uint32_t>(val) == val) && val >= std::numeric_limits<uint32_t>::lowest() && val <= std::numeric_limits<uint32_t>::max());
226 case DataType::S32:
227 return ((static_cast<int32_t>(val) == val) && val >= std::numeric_limits<int32_t>::lowest() && val <= std::numeric_limits<int32_t>::max());
228 case DataType::U64:
229 return (val >= std::numeric_limits<uint64_t>::lowest() && val <= std::numeric_limits<uint64_t>::max());
230 case DataType::S64:
231 return (val >= std::numeric_limits<int64_t>::lowest() && val <= std::numeric_limits<int64_t>::max());
232 case DataType::F16:
233 return (val >= std::numeric_limits<half>::lowest() && val <= std::numeric_limits<half>::max());
234 case DataType::F32:
235 return (val >= std::numeric_limits<float>::lowest() && val <= std::numeric_limits<float>::max());
236 case DataType::F64:
237 return (val >= std::numeric_limits<double>::lowest() && val <= std::numeric_limits<double>::max());
238 case DataType::SIZET:
239 return ((static_cast<size_t>(val) == val) && val >= std::numeric_limits<size_t>::lowest() && val <= std::numeric_limits<size_t>::max());
240 default:
241 ARM_COMPUTE_ERROR("Data type not supported");
242 return false;
243 }
244}
245
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100246/** Maps a tensor if needed
247 *
248 * @param[in] tensor Tensor to be mapped
249 * @param[in] blocking Specified if map is blocking or not
250 */
251template <typename T>
Gian Marco Iodiceae27e942017-09-28 18:31:26 +0100252inline void map(T &tensor, bool blocking)
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100253{
254 ARM_COMPUTE_UNUSED(tensor);
255 ARM_COMPUTE_UNUSED(blocking);
256}
257
258/** Unmaps a tensor if needed
259 *
260 * @param tensor Tensor to be unmapped
261 */
262template <typename T>
Gian Marco Iodiceae27e942017-09-28 18:31:26 +0100263inline void unmap(T &tensor)
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100264{
265 ARM_COMPUTE_UNUSED(tensor);
266}
267
268#ifdef ARM_COMPUTE_CL
269/** Maps a tensor if needed
270 *
271 * @param[in] tensor Tensor to be mapped
272 * @param[in] blocking Specified if map is blocking or not
273 */
Gian Marco Iodiceae27e942017-09-28 18:31:26 +0100274inline void map(CLTensor &tensor, bool blocking)
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100275{
276 tensor.map(blocking);
277}
278
279/** Unmaps a tensor if needed
280 *
281 * @param tensor Tensor to be unmapped
282 */
Gian Marco Iodiceae27e942017-09-28 18:31:26 +0100283inline void unmap(CLTensor &tensor)
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100284{
285 tensor.unmap();
286}
Isabella Gottardi02aabcc2017-10-12 17:28:51 +0100287
288/** Maps a distribution if needed
289 *
290 * @param[in] distribution Distribution to be mapped
291 * @param[in] blocking Specified if map is blocking or not
292 */
293inline void map(CLDistribution1D &distribution, bool blocking)
294{
295 distribution.map(blocking);
296}
297
298/** Unmaps a distribution if needed
299 *
300 * @param distribution Distribution to be unmapped
301 */
302inline void unmap(CLDistribution1D &distribution)
303{
304 distribution.unmap();
305}
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100306#endif /* ARM_COMPUTE_CL */
307
Anthony Barbier7068f992017-10-26 15:23:08 +0100308#ifdef ARM_COMPUTE_GC
309/** Maps a tensor if needed
310 *
311 * @param[in] tensor Tensor to be mapped
312 * @param[in] blocking Specified if map is blocking or not
313 */
314inline void map(GCTensor &tensor, bool blocking)
315{
316 tensor.map(blocking);
317}
318
319/** Unmaps a tensor if needed
320 *
321 * @param tensor Tensor to be unmapped
322 */
323inline void unmap(GCTensor &tensor)
324{
325 tensor.unmap();
326}
327#endif /* ARM_COMPUTE_GC */
328
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000329/** Specialized class to generate random non-zero FP16 values.
330 * uniform_real_distribution<half> generates values that get rounded off to zero, causing
331 * differences between ACL and reference implementation
332*/
333class uniform_real_distribution_fp16
334{
335 half min{ 0.0f }, max{ 0.0f };
336 std::uniform_real_distribution<float> neg{ min, -0.3f };
337 std::uniform_real_distribution<float> pos{ 0.3f, max };
338 std::uniform_int_distribution<uint8_t> sign_picker{ 0, 1 };
339
340public:
341 using result_type = half;
342 /** Constructor
343 *
344 * @param[in] a Minimum value of the distribution
345 * @param[in] b Maximum value of the distribution
346 */
347 explicit uniform_real_distribution_fp16(half a = half(0.0), half b = half(1.0))
348 : min(a), max(b)
349 {
350 }
351
352 /** () operator to generate next value
353 *
354 * @param[in] gen an uniform random bit generator object
355 */
356 half operator()(std::mt19937 &gen)
357 {
358 if(sign_picker(gen))
359 {
360 return (half)neg(gen);
361 }
362 return (half)pos(gen);
363 }
364};
365
Alex Gildayc357c472018-03-21 13:54:09 +0000366/** Numpy data loader */
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100367class NPYLoader
368{
369public:
Alex Gildayc357c472018-03-21 13:54:09 +0000370 /** Default constructor */
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100371 NPYLoader()
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100372 : _fs(), _shape(), _fortran_order(false), _typestring(), _file_layout(DataLayout::NCHW)
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100373 {
374 }
375
376 /** Open a NPY file and reads its metadata
377 *
378 * @param[in] npy_filename File to open
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100379 * @param[in] file_layout (Optional) Layout in which the weights are stored in the file.
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100380 */
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100381 void open(const std::string &npy_filename, DataLayout file_layout = DataLayout::NCHW)
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100382 {
383 ARM_COMPUTE_ERROR_ON(is_open());
384 try
385 {
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100386 _fs.open(npy_filename, std::ios::in | std::ios::binary);
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100387 ARM_COMPUTE_EXIT_ON_MSG(!_fs.good(), "Failed to load binary data from %s", npy_filename.c_str());
388 _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
389 _file_layout = file_layout;
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100390
391 std::tie(_shape, _fortran_order, _typestring) = parse_npy_header(_fs);
392 }
393 catch(const std::ifstream::failure &e)
394 {
395 ARM_COMPUTE_ERROR("Accessing %s: %s", npy_filename.c_str(), e.what());
396 }
397 }
398 /** Return true if a NPY file is currently open */
399 bool is_open()
400 {
401 return _fs.is_open();
402 }
403
404 /** Return true if a NPY file is in fortran order */
405 bool is_fortran()
406 {
407 return _fortran_order;
408 }
409
Gian Marco0bc5a252017-12-04 13:55:08 +0000410 /** Initialise the tensor's metadata with the dimensions of the NPY file currently open
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100411 *
412 * @param[out] tensor Tensor to initialise
Gian Marco0bc5a252017-12-04 13:55:08 +0000413 * @param[in] dt Data type to use for the tensor
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100414 */
415 template <typename T>
Gian Marco0bc5a252017-12-04 13:55:08 +0000416 void init_tensor(T &tensor, arm_compute::DataType dt)
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100417 {
418 ARM_COMPUTE_ERROR_ON(!is_open());
Gian Marco0bc5a252017-12-04 13:55:08 +0000419 ARM_COMPUTE_ERROR_ON(dt != arm_compute::DataType::F32);
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100420
421 // Use the size of the input NPY tensor
422 TensorShape shape;
423 shape.set_num_dimensions(_shape.size());
424 for(size_t i = 0; i < _shape.size(); ++i)
425 {
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100426 size_t src = i;
427 if(_fortran_order)
428 {
429 src = _shape.size() - 1 - i;
430 }
431 shape.set(i, _shape.at(src));
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100432 }
433
Gian Marco0bc5a252017-12-04 13:55:08 +0000434 arm_compute::TensorInfo tensor_info(shape, 1, dt);
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100435 tensor.allocator()->init(tensor_info);
436 }
437
438 /** Fill a tensor with the content of the currently open NPY file.
439 *
440 * @note If the tensor is a CLTensor, the function maps and unmaps the tensor
441 *
442 * @param[in,out] tensor Tensor to fill (Must be allocated, and of matching dimensions with the opened NPY).
443 */
444 template <typename T>
445 void fill_tensor(T &tensor)
446 {
447 ARM_COMPUTE_ERROR_ON(!is_open());
Georgios Pinitasa799ce02018-09-12 20:11:34 +0100448 ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::QASYMM8, arm_compute::DataType::S32, arm_compute::DataType::F32);
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100449 try
450 {
451 // Map buffer if creating a CLTensor
452 map(tensor, true);
453
454 // Check if the file is large enough to fill the tensor
455 const size_t current_position = _fs.tellg();
456 _fs.seekg(0, std::ios_base::end);
457 const size_t end_position = _fs.tellg();
458 _fs.seekg(current_position, std::ios_base::beg);
459
460 ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size() * tensor.info()->element_size(),
461 "Not enough data in file");
462 ARM_COMPUTE_UNUSED(end_position);
463
464 // Check if the typestring matches the given one
465 std::string expect_typestr = get_typestring(tensor.info()->data_type());
466 ARM_COMPUTE_ERROR_ON_MSG(_typestring != expect_typestr, "Typestrings mismatch");
467
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100468 bool are_layouts_different = (_file_layout != tensor.info()->data_layout());
469 // Correct dimensions (Needs to match TensorShape dimension corrections)
470 if(_shape.size() != tensor.info()->tensor_shape().num_dimensions())
471 {
472 for(int i = static_cast<int>(_shape.size()) - 1; i > 0; --i)
473 {
474 if(_shape[i] == 1)
475 {
476 _shape.pop_back();
477 }
478 else
479 {
480 break;
481 }
482 }
483 }
Michalis Spyrou39412952018-08-14 17:06:16 +0100484
485 TensorShape permuted_shape = tensor.info()->tensor_shape();
486 arm_compute::PermutationVector perm;
487 if(are_layouts_different && tensor.info()->tensor_shape().num_dimensions() > 2)
488 {
489 perm = (tensor.info()->data_layout() == arm_compute::DataLayout::NHWC) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
490 arm_compute::PermutationVector perm_vec = (tensor.info()->data_layout() == arm_compute::DataLayout::NCHW) ? arm_compute::PermutationVector(2U, 0U, 1U) : arm_compute::PermutationVector(1U, 2U, 0U);
491
492 arm_compute::permute(permuted_shape, perm_vec);
493 }
494
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100495 // Validate tensor shape
Anthony Barbier6db0ff52018-01-05 10:59:12 +0000496 ARM_COMPUTE_ERROR_ON_MSG(_shape.size() != tensor.info()->tensor_shape().num_dimensions(), "Tensor ranks mismatch");
Michalis Spyrou39412952018-08-14 17:06:16 +0100497 for(size_t i = 0; i < _shape.size(); ++i)
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100498 {
Michalis Spyrou39412952018-08-14 17:06:16 +0100499 ARM_COMPUTE_ERROR_ON_MSG(permuted_shape[i] != _shape[i], "Tensor dimensions mismatch");
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100500 }
501
Gian Marco0bc5a252017-12-04 13:55:08 +0000502 switch(tensor.info()->data_type())
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100503 {
Georgios Pinitasa799ce02018-09-12 20:11:34 +0100504 case arm_compute::DataType::QASYMM8:
505 case arm_compute::DataType::S32:
Gian Marco0bc5a252017-12-04 13:55:08 +0000506 case arm_compute::DataType::F32:
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000507 case arm_compute::DataType::F16:
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100508 {
509 // Read data
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100510 if(!are_layouts_different && !_fortran_order && tensor.info()->padding().empty())
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100511 {
512 // If tensor has no padding read directly from stream.
513 _fs.read(reinterpret_cast<char *>(tensor.buffer()), tensor.info()->total_size());
514 }
515 else
516 {
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100517 // If tensor has padding or is in fortran order accessing tensor elements through execution window.
Michalis Spyrou39412952018-08-14 17:06:16 +0100518 Window window;
519 const unsigned int num_dims = _shape.size();
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100520 if(_fortran_order)
521 {
522 for(unsigned int dim = 0; dim < num_dims; dim++)
523 {
Michalis Spyrou39412952018-08-14 17:06:16 +0100524 permuted_shape.set(dim, _shape[num_dims - dim - 1]);
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100525 perm.set(dim, num_dims - dim - 1);
526 }
Michalis Spyrou39412952018-08-14 17:06:16 +0100527 if(are_layouts_different)
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100528 {
Michalis Spyrou39412952018-08-14 17:06:16 +0100529 // Permute only if num_dimensions greater than 2
530 if(num_dims > 2)
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100531 {
Michalis Spyrou39412952018-08-14 17:06:16 +0100532 if(_file_layout == DataLayout::NHWC) // i.e destination is NCHW --> permute(1,2,0)
533 {
534 arm_compute::permute(perm, arm_compute::PermutationVector(1U, 2U, 0U));
535 }
536 else
537 {
538 arm_compute::permute(perm, arm_compute::PermutationVector(2U, 0U, 1U));
539 }
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100540 }
541 }
542 }
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100543 window.use_tensor_dimensions(permuted_shape);
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100544
545 execute_window_loop(window, [&](const Coordinates & id)
546 {
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100547 Coordinates dst(id);
548 arm_compute::permute(dst, perm);
549 _fs.read(reinterpret_cast<char *>(tensor.ptr_to_element(dst)), tensor.info()->element_size());
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100550 });
551 }
552
553 break;
554 }
555 default:
Gian Marco0bc5a252017-12-04 13:55:08 +0000556 ARM_COMPUTE_ERROR("Unsupported data type");
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100557 }
558
559 // Unmap buffer if creating a CLTensor
560 unmap(tensor);
561 }
562 catch(const std::ifstream::failure &e)
563 {
564 ARM_COMPUTE_ERROR("Loading NPY file: %s", e.what());
565 }
566 }
567
568private:
569 std::ifstream _fs;
570 std::vector<unsigned long> _shape;
571 bool _fortran_order;
572 std::string _typestring;
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100573 DataLayout _file_layout;
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100574};
575
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100576/** Template helper function to save a tensor image to a PPM file.
577 *
578 * @note Only U8 and RGB888 formats supported.
579 * @note Only works with 2D tensors.
580 * @note If the input tensor is a CLTensor, the function maps and unmaps the image
581 *
582 * @param[in] tensor The tensor to save as PPM file
583 * @param[in] ppm_filename Filename of the file to create.
584 */
585template <typename T>
586void save_to_ppm(T &tensor, const std::string &ppm_filename)
587{
588 ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&tensor, arm_compute::Format::RGB888, arm_compute::Format::U8);
589 ARM_COMPUTE_ERROR_ON(tensor.info()->num_dimensions() > 2);
590
591 std::ofstream fs;
592
593 try
594 {
595 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
596 fs.open(ppm_filename, std::ios::out | std::ios::binary);
597
598 const unsigned int width = tensor.info()->tensor_shape()[0];
599 const unsigned int height = tensor.info()->tensor_shape()[1];
600
601 fs << "P6\n"
602 << width << " " << height << " 255\n";
603
Anthony Barbier7068f992017-10-26 15:23:08 +0100604 // Map buffer if creating a CLTensor/GCTensor
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100605 map(tensor, true);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100606
607 switch(tensor.info()->format())
608 {
609 case arm_compute::Format::U8:
610 {
611 arm_compute::Window window;
612 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, 1));
613 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
614
615 arm_compute::Iterator in(&tensor, window);
616
617 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
618 {
619 const unsigned char value = *in.ptr();
620
621 fs << value << value << value;
622 },
623 in);
624
625 break;
626 }
627 case arm_compute::Format::RGB888:
628 {
629 arm_compute::Window window;
630 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, width));
631 window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
632
633 arm_compute::Iterator in(&tensor, window);
634
635 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
636 {
637 fs.write(reinterpret_cast<std::fstream::char_type *>(in.ptr()), width * tensor.info()->element_size());
638 },
639 in);
640
641 break;
642 }
643 default:
644 ARM_COMPUTE_ERROR("Unsupported format");
645 }
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100646
Anthony Barbier7068f992017-10-26 15:23:08 +0100647 // Unmap buffer if creating a CLTensor/GCTensor
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100648 unmap(tensor);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100649 }
650 catch(const std::ofstream::failure &e)
651 {
652 ARM_COMPUTE_ERROR("Writing %s: (%s)", ppm_filename.c_str(), e.what());
653 }
654}
steniu01bee466b2017-06-21 16:45:41 +0100655
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100656/** Template helper function to save a tensor image to a NPY file.
657 *
Gian Marcobfa3b522017-12-12 10:08:38 +0000658 * @note Only F32 data type supported.
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100659 * @note If the input tensor is a CLTensor, the function maps and unmaps the image
660 *
661 * @param[in] tensor The tensor to save as NPY file
662 * @param[in] npy_filename Filename of the file to create.
663 * @param[in] fortran_order If true, save matrix in fortran order.
664 */
665template <typename T>
666void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
667{
Gian Marcobfa3b522017-12-12 10:08:38 +0000668 ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32);
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100669
670 std::ofstream fs;
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100671 try
672 {
673 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
674 fs.open(npy_filename, std::ios::out | std::ios::binary);
675
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100676 std::vector<npy::ndarray_len_t> shape(tensor.info()->num_dimensions());
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100677
Pablo Tello32521432018-11-15 14:43:10 +0000678 for(unsigned int i = 0, j = tensor.info()->num_dimensions() - 1; i < tensor.info()->num_dimensions(); ++i, --j)
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100679 {
Pablo Tello32521432018-11-15 14:43:10 +0000680 shape[i] = tensor.info()->tensor_shape()[!fortran_order ? j : i];
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100681 }
682
683 // Map buffer if creating a CLTensor
684 map(tensor, true);
685
Gian Marcobfa3b522017-12-12 10:08:38 +0000686 switch(tensor.info()->data_type())
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100687 {
Gian Marcobfa3b522017-12-12 10:08:38 +0000688 case arm_compute::DataType::F32:
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100689 {
690 std::vector<float> tmp; /* Used only to get the typestring */
691 npy::Typestring typestring_o{ tmp };
692 std::string typestring = typestring_o.str();
693
694 std::ofstream stream(npy_filename, std::ofstream::binary);
Anthony Barbier87f21cd2017-11-10 16:27:32 +0000695 npy::write_header(stream, typestring, fortran_order, shape);
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100696
697 arm_compute::Window window;
Anthony Barbier4ead11a2018-08-06 09:25:36 +0100698 window.use_tensor_dimensions(tensor.info()->tensor_shape());
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100699
700 arm_compute::Iterator in(&tensor, window);
701
702 arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
703 {
704 stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(float));
705 },
706 in);
707
708 break;
709 }
710 default:
711 ARM_COMPUTE_ERROR("Unsupported format");
712 }
713
714 // Unmap buffer if creating a CLTensor
715 unmap(tensor);
716 }
717 catch(const std::ofstream::failure &e)
718 {
719 ARM_COMPUTE_ERROR("Writing %s: (%s)", npy_filename.c_str(), e.what());
720 }
721}
722
steniu01bee466b2017-06-21 16:45:41 +0100723/** Load the tensor with pre-trained data from a binary file
724 *
725 * @param[in] tensor The tensor to be filled. Data type supported: F32.
726 * @param[in] filename Filename of the binary file to load from.
727 */
728template <typename T>
729void load_trained_data(T &tensor, const std::string &filename)
730{
731 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32);
732
733 std::ifstream fs;
734
735 try
736 {
737 fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
738 // Open file
739 fs.open(filename, std::ios::in | std::ios::binary);
740
741 if(!fs.good())
742 {
743 throw std::runtime_error("Could not load binary data: " + filename);
744 }
745
Anthony Barbier7068f992017-10-26 15:23:08 +0100746 // Map buffer if creating a CLTensor/GCTensor
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100747 map(tensor, true);
748
steniu01bee466b2017-06-21 16:45:41 +0100749 Window window;
750
751 window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, 1, 1));
752
753 for(unsigned int d = 1; d < tensor.info()->num_dimensions(); ++d)
754 {
755 window.set(d, Window::Dimension(0, tensor.info()->tensor_shape()[d], 1));
756 }
757
758 arm_compute::Iterator in(&tensor, window);
759
760 execute_window_loop(window, [&](const Coordinates & id)
761 {
762 fs.read(reinterpret_cast<std::fstream::char_type *>(in.ptr()), tensor.info()->tensor_shape()[0] * tensor.info()->element_size());
763 },
764 in);
765
Anthony Barbier7068f992017-10-26 15:23:08 +0100766 // Unmap buffer if creating a CLTensor/GCTensor
Georgios Pinitasdc836b62017-09-20 14:02:37 +0100767 unmap(tensor);
steniu01bee466b2017-06-21 16:45:41 +0100768 }
769 catch(const std::ofstream::failure &e)
770 {
771 ARM_COMPUTE_ERROR("Writing %s: (%s)", filename.c_str(), e.what());
772 }
773}
774
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100775template <typename T>
776void fill_random_tensor(T &tensor, float lower_bound, float upper_bound)
Anthony Barbier2a07e182017-08-04 18:20:27 +0100777{
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100778 std::random_device rd;
779 std::mt19937 gen(rd());
Anthony Barbier2a07e182017-08-04 18:20:27 +0100780
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100781 Window window;
Michalis Spyrou5e69bb42018-03-09 16:36:00 +0000782 window.use_tensor_dimensions(tensor.info()->tensor_shape());
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100783
784 map(tensor, true);
785
786 Iterator it(&tensor, window);
787
Gian Marcobfa3b522017-12-12 10:08:38 +0000788 switch(tensor.info()->data_type())
Anthony Barbier2a07e182017-08-04 18:20:27 +0100789 {
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000790 case arm_compute::DataType::F16:
791 {
792 std::uniform_real_distribution<float> dist(lower_bound, upper_bound);
793
794 execute_window_loop(window, [&](const Coordinates & id)
795 {
796 *reinterpret_cast<half *>(it.ptr()) = (half)dist(gen);
797 },
798 it);
799
800 break;
801 }
Gian Marcobfa3b522017-12-12 10:08:38 +0000802 case arm_compute::DataType::F32:
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100803 {
804 std::uniform_real_distribution<float> dist(lower_bound, upper_bound);
805
806 execute_window_loop(window, [&](const Coordinates & id)
807 {
808 *reinterpret_cast<float *>(it.ptr()) = dist(gen);
809 },
810 it);
811
812 break;
813 }
Anthony Barbier2a07e182017-08-04 18:20:27 +0100814 default:
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100815 {
816 ARM_COMPUTE_ERROR("Unsupported format");
817 }
Anthony Barbier2a07e182017-08-04 18:20:27 +0100818 }
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100819
820 unmap(tensor);
Anthony Barbier2a07e182017-08-04 18:20:27 +0100821}
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100822
823template <typename T>
Gian Marco0bc5a252017-12-04 13:55:08 +0000824void init_sgemm_output(T &dst, T &src0, T &src1, arm_compute::DataType dt)
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100825{
Gian Marco0bc5a252017-12-04 13:55:08 +0000826 dst.allocator()->init(TensorInfo(TensorShape(src1.info()->dimension(0), src0.info()->dimension(1)), 1, dt));
Giorgio Arenacf3935f2017-10-26 17:14:13 +0100827}
Gian Marco5ca74092018-02-08 16:21:54 +0000828/** This function returns the amount of memory free reading from /proc/meminfo
829 *
830 * @return The free memory in kB
831 */
832uint64_t get_mem_free_from_meminfo();
Isabella Gottardi88d5b222018-04-06 12:24:55 +0100833
834/** Compare to tensor
835 *
836 * @param[in] tensor1 First tensor to be compared.
837 * @param[in] tensor2 Second tensor to be compared.
838 *
839 * @return The number of mismatches
840 */
841template <typename T>
842int compare_tensor(ITensor &tensor1, ITensor &tensor2)
843{
844 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(&tensor1, &tensor2);
845 ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(&tensor1, &tensor2);
846
847 int num_mismatches = 0;
848 Window window;
849 window.use_tensor_dimensions(tensor1.info()->tensor_shape());
850
851 map(tensor1, true);
852 map(tensor2, true);
Pablo Tello32521432018-11-15 14:43:10 +0000853
Isabella Gottardi88d5b222018-04-06 12:24:55 +0100854 Iterator itensor1(&tensor1, window);
855 Iterator itensor2(&tensor2, window);
856
857 execute_window_loop(window, [&](const Coordinates & id)
858 {
Pablo Tello32521432018-11-15 14:43:10 +0000859 if(std::abs(*reinterpret_cast<T *>(itensor1.ptr()) - *reinterpret_cast<T *>(itensor2.ptr())) > 0.0001)
Isabella Gottardi88d5b222018-04-06 12:24:55 +0100860 {
861 ++num_mismatches;
862 }
863 },
864 itensor1, itensor2);
865
866 unmap(itensor1);
867 unmap(itensor2);
868
869 return num_mismatches;
870}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100871} // namespace utils
872} // namespace arm_compute
873#endif /* __UTILS_UTILS_H__*/