blob: e1b80b874ab72026890fa87d5e23ef73c1a6f46a [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
Derek Lambertif674aa02019-08-01 15:56:25 +01007#include <armnn/MemorySources.hpp>
8
telsoa014fcda012018-03-09 14:13:49 +00009namespace armnn
10{
11
telsoa01c577f2c2018-08-31 09:22:23 +010012class TensorShape;
13
telsoa014fcda012018-03-09 14:13:49 +000014class ITensorHandle
15{
16public:
telsoa014fcda012018-03-09 14:13:49 +000017 virtual ~ITensorHandle(){}
telsoa01c577f2c2018-08-31 09:22:23 +010018
19 /// Indicate to the memory manager that this resource is active.
20 /// This is used to compute overlapping lifetimes of resources.
21 virtual void Manage() = 0;
22
23 /// Indicate to the memory manager that this resource is no longer active.
24 /// This is used to compute overlapping lifetimes of resources.
telsoa014fcda012018-03-09 14:13:49 +000025 virtual void Allocate() = 0;
telsoa01c577f2c2018-08-31 09:22:23 +010026
telsoa01c577f2c2018-08-31 09:22:23 +010027 /// Get the parent tensor if this is a subtensor.
28 /// \return a pointer to the parent tensor. Otherwise nullptr if not a subtensor.
29 virtual ITensorHandle* GetParent() const = 0;
30
31 /// Map the tensor data for access.
32 /// \param blocking hint to block the calling thread until all other accesses are complete. (backend dependent)
33 /// \return pointer to the first element of the mapped data.
34 virtual const void* Map(bool blocking=true) const = 0;
35
36 /// Unmap the tensor data
37 virtual void Unmap() const = 0;
38
39 /// Map the tensor data for access. Must be paired with call to Unmap().
40 /// \param blocking hint to block the calling thread until all other accesses are complete. (backend dependent)
41 /// \return pointer to the first element of the mapped data.
42 void* Map(bool blocking=true)
43 {
44 return const_cast<void*>(static_cast<const ITensorHandle*>(this)->Map(blocking));
45 }
46
47 /// Unmap the tensor data that was previously mapped with call to Map().
48 void Unmap()
49 {
50 return static_cast<const ITensorHandle*>(this)->Unmap();
51 }
52
53 /// Get the strides for each dimension ordered from largest to smallest where
54 /// the smallest value is the same as the size of a single element in the tensor.
55 /// \return a TensorShape filled with the strides for each dimension
56 virtual TensorShape GetStrides() const = 0;
57
David Beck09e2f272018-10-30 11:38:41 +000058 /// Get the number of elements for each dimension ordered from slowest iterating dimension
telsoa01c577f2c2018-08-31 09:22:23 +010059 /// to fastest iterating dimension.
60 /// \return a TensorShape filled with the number of elements for each dimension.
61 virtual TensorShape GetShape() const = 0;
David Beck09e2f272018-10-30 11:38:41 +000062
63 // Testing support to be able to verify and set tensor data content
64 virtual void CopyOutTo(void* memory) const = 0;
65 virtual void CopyInFrom(const void* memory) = 0;
Derek Lambertif674aa02019-08-01 15:56:25 +010066
67 /// Get flags describing supported import sources.
68 virtual unsigned int GetImportFlags() const { return 0; }
69
70 /// Import externally allocated memory
71 /// \param memory base address of the memory being imported.
72 /// \param source source of the allocation for the memory being imported.
73 /// \return true on success or false on failure
74 virtual bool Import(void* memory, MemorySource source) { return false; };
telsoa014fcda012018-03-09 14:13:49 +000075};
76
77}