blob: f0e9b150218dd37c84bb36caa40aa4f08dcfd920 [file] [log] [blame]
Moritz Pflanzerc7d15032017-07-18 16:21:16 +01001/*
Manuel Bottinia788c2f2019-04-08 13:18:00 +01002 * Copyright (c) 2017-2019 ARM Limited.
Moritz Pflanzerc7d15032017-07-18 16:21:16 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_TEST_SIMPLE_TENSOR_H__
25#define __ARM_COMPUTE_TEST_SIMPLE_TENSOR_H__
26
27#include "arm_compute/core/TensorShape.h"
28#include "arm_compute/core/Types.h"
29#include "arm_compute/core/Utils.h"
30#include "support/ToolchainSupport.h"
31#include "tests/IAccessor.h"
32#include "tests/Utils.h"
33
34#include <algorithm>
35#include <array>
36#include <cstddef>
37#include <cstdint>
38#include <functional>
39#include <memory>
40#include <stdexcept>
41#include <utility>
42
43namespace arm_compute
44{
45namespace test
46{
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +010047class RawTensor;
48
Moritz Pflanzerc7d15032017-07-18 16:21:16 +010049/** Simple tensor object that stores elements in a consecutive chunk of memory.
50 *
51 * It can be created by either loading an image from a file which also
52 * initialises the content of the tensor or by explcitly specifying the size.
53 * The latter leaves the content uninitialised.
54 *
55 * Furthermore, the class provides methods to convert the tensor's values into
56 * different image format.
57 */
58template <typename T>
Moritz Pflanzer82e70a12017-08-08 16:20:45 +010059class SimpleTensor : public IAccessor
Moritz Pflanzerc7d15032017-07-18 16:21:16 +010060{
61public:
62 /** Create an uninitialised tensor. */
63 SimpleTensor() = default;
64
65 /** Create an uninitialised tensor of the given @p shape and @p format.
66 *
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010067 * @param[in] shape Shape of the new raw tensor.
68 * @param[in] format Format of the new raw tensor.
Moritz Pflanzerc7d15032017-07-18 16:21:16 +010069 */
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010070 SimpleTensor(TensorShape shape, Format format);
Moritz Pflanzerc7d15032017-07-18 16:21:16 +010071
72 /** Create an uninitialised tensor of the given @p shape and @p data type.
73 *
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010074 * @param[in] shape Shape of the new raw tensor.
75 * @param[in] data_type Data type of the new raw tensor.
76 * @param[in] num_channels (Optional) Number of channels (default = 1).
77 * @param[in] quantization_info (Optional) Quantization info for asymmetric quantization (default = empty).
78 * @param[in] data_layout (Optional) Data layout of the tensor (default = NCHW).
Moritz Pflanzerc7d15032017-07-18 16:21:16 +010079 */
Chunosovd621bca2017-11-03 17:33:15 +070080 SimpleTensor(TensorShape shape, DataType data_type,
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +010081 int num_channels = 1,
82 QuantizationInfo quantization_info = QuantizationInfo(),
83 DataLayout data_layout = DataLayout::NCHW);
Moritz Pflanzerc7d15032017-07-18 16:21:16 +010084
85 /** Create a deep copy of the given @p tensor.
86 *
87 * @param[in] tensor To be copied tensor.
88 */
89 SimpleTensor(const SimpleTensor &tensor);
90
91 /** Create a deep copy of the given @p tensor.
92 *
93 * @param[in] tensor To be copied tensor.
Alex Gildayc357c472018-03-21 13:54:09 +000094 *
95 * @return a copy of the given tensor.
Moritz Pflanzerc7d15032017-07-18 16:21:16 +010096 */
Alex Gildayc357c472018-03-21 13:54:09 +000097 SimpleTensor &operator=(SimpleTensor tensor);
98 /** Allow instances of this class to be move constructed */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +010099 SimpleTensor(SimpleTensor &&) = default;
Alex Gildayc357c472018-03-21 13:54:09 +0000100 /** Default destructor. */
101 ~SimpleTensor() = default;
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100102
Alex Gildayc357c472018-03-21 13:54:09 +0000103 /** Tensor value type */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100104 using value_type = T;
Alex Gildayc357c472018-03-21 13:54:09 +0000105 /** Tensor buffer pointer type */
106 using Buffer = std::unique_ptr<value_type[]>;
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100107
Moritz Pflanzercde1e8a2017-09-08 09:53:14 +0100108 friend class RawTensor;
109
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100110 /** Return value at @p offset in the buffer.
111 *
112 * @param[in] offset Offset within the buffer.
Alex Gildayc357c472018-03-21 13:54:09 +0000113 *
114 * @return value in the buffer.
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100115 */
116 T &operator[](size_t offset);
117
118 /** Return constant value at @p offset in the buffer.
119 *
120 * @param[in] offset Offset within the buffer.
Alex Gildayc357c472018-03-21 13:54:09 +0000121 *
122 * @return constant value in the buffer.
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100123 */
124 const T &operator[](size_t offset) const;
125
Alex Gildayc357c472018-03-21 13:54:09 +0000126 /** Shape of the tensor.
127 *
128 * @return the shape of the tensor.
129 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100130 TensorShape shape() const override;
Alex Gildayc357c472018-03-21 13:54:09 +0000131 /** Size of each element in the tensor in bytes.
132 *
133 * @return the size of each element in the tensor in bytes.
134 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100135 size_t element_size() const override;
Alex Gildayc357c472018-03-21 13:54:09 +0000136 /** Total size of the tensor in bytes.
137 *
138 * @return the total size of the tensor in bytes.
139 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100140 size_t size() const override;
Alex Gildayc357c472018-03-21 13:54:09 +0000141 /** Image format of the tensor.
142 *
143 * @return the format of the tensor.
144 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100145 Format format() const override;
Alex Gildayc357c472018-03-21 13:54:09 +0000146 /** Data layout of the tensor.
147 *
148 * @return the data layout of the tensor.
149 */
150 DataLayout data_layout() const override;
151 /** Data type of the tensor.
152 *
153 * @return the data type of the tensor.
154 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100155 DataType data_type() const override;
Alex Gildayc357c472018-03-21 13:54:09 +0000156 /** Number of channels of the tensor.
157 *
158 * @return the number of channels of the tensor.
159 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100160 int num_channels() const override;
Alex Gildayc357c472018-03-21 13:54:09 +0000161 /** Number of elements of the tensor.
162 *
163 * @return the number of elements of the tensor.
164 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100165 int num_elements() const override;
Alex Gildayc357c472018-03-21 13:54:09 +0000166 /** Available padding around the tensor.
167 *
168 * @return the available padding around the tensor.
169 */
Giorgio Arenaa2611812017-07-21 10:08:48 +0100170 PaddingSize padding() const override;
Alex Gildayc357c472018-03-21 13:54:09 +0000171 /** Quantization info in case of asymmetric quantized type
172 *
173 * @return
174 */
Chunosovd621bca2017-11-03 17:33:15 +0700175 QuantizationInfo quantization_info() const override;
176
Alex Gildayc357c472018-03-21 13:54:09 +0000177 /** Constant pointer to the underlying buffer.
178 *
179 * @return a constant pointer to the data.
180 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100181 const T *data() const;
182
Alex Gildayc357c472018-03-21 13:54:09 +0000183 /** Pointer to the underlying buffer.
184 *
185 * @return a pointer to the data.
186 */
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100187 T *data();
188
189 /** Read only access to the specified element.
190 *
191 * @param[in] coord Coordinates of the desired element.
192 *
193 * @return A pointer to the desired element.
194 */
195 const void *operator()(const Coordinates &coord) const override;
196
197 /** Access to the specified element.
198 *
199 * @param[in] coord Coordinates of the desired element.
200 *
201 * @return A pointer to the desired element.
202 */
203 void *operator()(const Coordinates &coord) override;
204
205 /** Swaps the content of the provided tensors.
206 *
207 * @param[in, out] tensor1 Tensor to be swapped.
208 * @param[in, out] tensor2 Tensor to be swapped.
209 */
210 template <typename U>
211 friend void swap(SimpleTensor<U> &tensor1, SimpleTensor<U> &tensor2);
212
Moritz Pflanzer82e70a12017-08-08 16:20:45 +0100213protected:
Chunosovd621bca2017-11-03 17:33:15 +0700214 Buffer _buffer{ nullptr };
215 TensorShape _shape{};
216 Format _format{ Format::UNKNOWN };
217 DataType _data_type{ DataType::UNKNOWN };
218 int _num_channels{ 0 };
Chunosovd621bca2017-11-03 17:33:15 +0700219 QuantizationInfo _quantization_info{};
Michele Di Giorgio4a65b982018-03-02 11:21:38 +0000220 DataLayout _data_layout{ DataLayout::UNKNOWN };
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100221};
222
Vidhya Sudhan Loganathana25d16c2018-11-16 11:33:12 +0000223template <typename T1, typename T2>
224SimpleTensor<T1> copy_tensor(const SimpleTensor<T2> &tensor)
225{
226 SimpleTensor<T1> st(tensor.shape(), tensor.data_type(),
227 tensor.num_channels(),
228 tensor.quantization_info(),
229 tensor.data_layout());
230 for(size_t n = 0; n < size_t(st.num_elements()); n++)
231 {
232 st.data()[n] = static_cast<T1>(tensor.data()[n]);
233 }
234 return st;
235}
236
237template <typename T1, typename T2, typename std::enable_if<std::is_same<T1, T2>::value, int>::type = 0>
238SimpleTensor<T1> copy_tensor(const SimpleTensor<half> &tensor)
239{
240 SimpleTensor<T1> st(tensor.shape(), tensor.data_type(),
241 tensor.num_channels(),
242 tensor.quantization_info(),
243 tensor.data_layout());
244 memcpy((void *)st.data(), (const void *)tensor.data(), size_t(st.num_elements() * sizeof(T1)));
245 return st;
246}
247
248template < typename T1, typename T2, typename std::enable_if < (std::is_same<T1, half>::value || std::is_same<T2, half>::value), int >::type = 0 >
249SimpleTensor<T1> copy_tensor(const SimpleTensor<half> &tensor)
250{
251 SimpleTensor<T1> st(tensor.shape(), tensor.data_type(),
252 tensor.num_channels(),
253 tensor.quantization_info(),
254 tensor.data_layout());
255 for(size_t n = 0; n < size_t(st.num_elements()); n++)
256 {
257 st.data()[n] = half_float::detail::half_cast<T1, T2>(tensor.data()[n]);
258 }
259 return st;
260}
261
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100262template <typename T>
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100263SimpleTensor<T>::SimpleTensor(TensorShape shape, Format format)
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100264 : _buffer(nullptr),
265 _shape(shape),
266 _format(format),
Giorgio Arena563494c2018-04-30 17:29:41 +0100267 _quantization_info(),
268 _data_layout(DataLayout::NCHW)
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100269{
John Richardson25f23682017-11-27 14:35:09 +0000270 _num_channels = num_channels();
271 _buffer = support::cpp14::make_unique<T[]>(num_elements() * _num_channels);
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100272}
273
274template <typename T>
Vidhya Sudhan Loganathan014333d2018-07-02 09:13:49 +0100275SimpleTensor<T>::SimpleTensor(TensorShape shape, DataType data_type, int num_channels, QuantizationInfo quantization_info, DataLayout data_layout)
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100276 : _buffer(nullptr),
277 _shape(shape),
278 _data_type(data_type),
279 _num_channels(num_channels),
Michele Di Giorgio4a65b982018-03-02 11:21:38 +0000280 _quantization_info(quantization_info),
281 _data_layout(data_layout)
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100282{
Manuel Bottinia788c2f2019-04-08 13:18:00 +0100283 _buffer = support::cpp14::make_unique<T[]>(this->_shape.total_size() * _num_channels);
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100284}
285
286template <typename T>
287SimpleTensor<T>::SimpleTensor(const SimpleTensor &tensor)
288 : _buffer(nullptr),
289 _shape(tensor.shape()),
290 _format(tensor.format()),
Moritz Pflanzer82e70a12017-08-08 16:20:45 +0100291 _data_type(tensor.data_type()),
292 _num_channels(tensor.num_channels()),
Giorgio Arena563494c2018-04-30 17:29:41 +0100293 _quantization_info(tensor.quantization_info()),
294 _data_layout(tensor.data_layout())
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100295{
Manuel Bottinia788c2f2019-04-08 13:18:00 +0100296 _buffer = support::cpp14::make_unique<T[]>(tensor.num_elements() * _num_channels);
297 std::copy_n(tensor.data(), this->_shape.total_size() * _num_channels, _buffer.get());
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100298}
299
300template <typename T>
301SimpleTensor<T> &SimpleTensor<T>::operator=(SimpleTensor tensor)
302{
303 swap(*this, tensor);
304
305 return *this;
306}
307
308template <typename T>
309T &SimpleTensor<T>::operator[](size_t offset)
310{
311 return _buffer[offset];
312}
313
314template <typename T>
315const T &SimpleTensor<T>::operator[](size_t offset) const
316{
317 return _buffer[offset];
318}
319
320template <typename T>
321TensorShape SimpleTensor<T>::shape() const
322{
323 return _shape;
324}
325
326template <typename T>
327size_t SimpleTensor<T>::element_size() const
328{
329 return num_channels() * element_size_from_data_type(data_type());
330}
331
332template <typename T>
Chunosovd621bca2017-11-03 17:33:15 +0700333QuantizationInfo SimpleTensor<T>::quantization_info() const
334{
335 return _quantization_info;
336}
337
338template <typename T>
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100339size_t SimpleTensor<T>::size() const
340{
341 const size_t size = std::accumulate(_shape.cbegin(), _shape.cend(), 1, std::multiplies<size_t>());
342 return size * element_size();
343}
344
345template <typename T>
346Format SimpleTensor<T>::format() const
347{
348 return _format;
349}
350
351template <typename T>
Michele Di Giorgio4a65b982018-03-02 11:21:38 +0000352DataLayout SimpleTensor<T>::data_layout() const
353{
354 return _data_layout;
355}
356
357template <typename T>
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100358DataType SimpleTensor<T>::data_type() const
359{
360 if(_format != Format::UNKNOWN)
361 {
362 return data_type_from_format(_format);
363 }
364 else
365 {
366 return _data_type;
367 }
368}
369
370template <typename T>
371int SimpleTensor<T>::num_channels() const
372{
373 switch(_format)
374 {
375 case Format::U8:
Anthony Barbier1fbb8122018-02-19 19:36:02 +0000376 case Format::U16:
Ioan-Cristian Szabo9414f642017-10-27 17:35:40 +0100377 case Format::S16:
Anthony Barbier1fbb8122018-02-19 19:36:02 +0000378 case Format::U32:
Ioan-Cristian Szabo9414f642017-10-27 17:35:40 +0100379 case Format::S32:
380 case Format::F16:
Moritz Pflanzer82e70a12017-08-08 16:20:45 +0100381 case Format::F32:
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100382 return 1;
Ioan-Cristian Szabo9414f642017-10-27 17:35:40 +0100383 // Because the U and V channels are subsampled
384 // these formats appear like having only 2 channels:
385 case Format::YUYV422:
386 case Format::UYVY422:
387 return 2;
388 case Format::UV88:
389 return 2;
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100390 case Format::RGB888:
391 return 3;
Ioan-Cristian Szabo9414f642017-10-27 17:35:40 +0100392 case Format::RGBA8888:
393 return 4;
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100394 case Format::UNKNOWN:
395 return _num_channels;
Ioan-Cristian Szabo9414f642017-10-27 17:35:40 +0100396 //Doesn't make sense for planar formats:
397 case Format::NV12:
398 case Format::NV21:
399 case Format::IYUV:
400 case Format::YUV444:
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100401 default:
Ioan-Cristian Szabo9414f642017-10-27 17:35:40 +0100402 return 0;
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100403 }
404}
405
406template <typename T>
407int SimpleTensor<T>::num_elements() const
408{
409 return _shape.total_size();
410}
411
412template <typename T>
Giorgio Arenaa2611812017-07-21 10:08:48 +0100413PaddingSize SimpleTensor<T>::padding() const
414{
415 return PaddingSize(0);
416}
417
418template <typename T>
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100419const T *SimpleTensor<T>::data() const
420{
421 return _buffer.get();
422}
423
424template <typename T>
425T *SimpleTensor<T>::data()
426{
427 return _buffer.get();
428}
429
430template <typename T>
431const void *SimpleTensor<T>::operator()(const Coordinates &coord) const
432{
John Richardson25f23682017-11-27 14:35:09 +0000433 return _buffer.get() + coord2index(_shape, coord) * _num_channels;
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100434}
435
436template <typename T>
437void *SimpleTensor<T>::operator()(const Coordinates &coord)
438{
John Richardson25f23682017-11-27 14:35:09 +0000439 return _buffer.get() + coord2index(_shape, coord) * _num_channels;
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100440}
441
442template <typename U>
443void swap(SimpleTensor<U> &tensor1, SimpleTensor<U> &tensor2)
444{
445 // Use unqualified call to swap to enable ADL. But make std::swap available
446 // as backup.
447 using std::swap;
448 swap(tensor1._shape, tensor2._shape);
449 swap(tensor1._format, tensor2._format);
450 swap(tensor1._data_type, tensor2._data_type);
451 swap(tensor1._num_channels, tensor2._num_channels);
Giorgio Arenac0f54432018-03-16 14:02:34 +0000452 swap(tensor1._quantization_info, tensor2._quantization_info);
Moritz Pflanzerc7d15032017-07-18 16:21:16 +0100453 swap(tensor1._buffer, tensor2._buffer);
454}
455} // namespace test
456} // namespace arm_compute
457#endif /* __ARM_COMPUTE_TEST_SIMPLE_TENSOR_H__ */