blob: f85b8ae777ecf17711b7a61748edcce3f1e831c9 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Georgios Pinitas035004e2021-04-13 19:44:17 +01002 * Copyright (c) 2016-2021 Arm Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/CLTensorAllocator.h"
25
26#include "arm_compute/core/Error.h"
27#include "arm_compute/core/TensorInfo.h"
Pablo Tellodb8485a2019-09-24 11:03:47 +010028#include "arm_compute/runtime/CL/CLRuntimeContext.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010029#include "arm_compute/runtime/CL/CLScheduler.h"
30
Georgios Pinitasdf310362018-11-14 13:16:56 +000031namespace arm_compute
32{
33const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer();
Georgios Pinitas99d40952018-04-23 16:26:46 +010034namespace
Anthony Barbier6ff3b192017-09-04 18:44:23 +010035{
Georgios Pinitas035004e2021-04-13 19:44:17 +010036/** Global user-defined allocator that can be used for all internal allocations of a CLTensor */
37static IAllocator *static_global_cl_allocator = nullptr;
38
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010039/** Helper function used to allocate the backing memory of a tensor
40 *
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010041 * @param[in] size Size of the allocation
42 * @param[in] alignment Alignment of the allocation
43 *
44 * @return A wrapped memory region
45 */
Georgios Pinitas2cd5b312021-05-04 21:39:57 +010046std::unique_ptr<ICLMemoryRegion> allocate_region(size_t size, cl_uint alignment)
Georgios Pinitas99d40952018-04-23 16:26:46 +010047{
48 // Try fine-grain SVM
Georgios Pinitas2cd5b312021-05-04 21:39:57 +010049 std::unique_ptr<ICLMemoryRegion> region = std::make_unique<CLFineSVMMemoryRegion>(CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER,
Georgios Pinitas40f51a62020-11-21 03:04:18 +000050 size,
51 alignment);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010052
Georgios Pinitas99d40952018-04-23 16:26:46 +010053 // Try coarse-grain SVM in case of failure
54 if(region != nullptr && region->ptr() == nullptr)
55 {
Georgios Pinitas2cd5b312021-05-04 21:39:57 +010056 region = std::make_unique<CLCoarseSVMMemoryRegion>(CL_MEM_READ_WRITE, size, alignment);
Georgios Pinitas99d40952018-04-23 16:26:46 +010057 }
58 // Try legacy buffer memory in case of failure
59 if(region != nullptr && region->ptr() == nullptr)
60 {
Georgios Pinitas2cd5b312021-05-04 21:39:57 +010061 region = std::make_unique<CLBufferMemoryRegion>(CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size);
Georgios Pinitas99d40952018-04-23 16:26:46 +010062 }
63 return region;
64}
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010065/** Clears quantization arrays
66 *
67 * @param[in, out] scale Quantization scale array
68 * @param[in, out] offset Quantization offset array
69 */
70void clear_quantization_arrays(CLFloatArray &scale, CLInt32Array &offset)
71{
72 // Clear arrays
73 scale = CLFloatArray();
74 offset = CLInt32Array();
75}
76/** Helper function used to create quantization data arrays
77 *
78 * @param[in, out] scale Quantization scale array
79 * @param[in, out] offset Quantization offset array
80 * @param[in] qinfo Quantization info
81 * @param[in] pad_size Pad size to use in case array needs to be padded for computation purposes
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010082 */
83void populate_quantization_info(CLFloatArray &scale, CLInt32Array &offset, const QuantizationInfo &qinfo, size_t pad_size)
84{
85 clear_quantization_arrays(scale, offset);
86
87 // Create scale array
Georgios Pinitas3d13af82019-06-04 13:04:16 +010088 const std::vector<float> &qscale = qinfo.scale();
89 const size_t num_elements = qscale.size();
90 const size_t element_size = sizeof(std::remove_reference<decltype(qscale)>::type::value_type);
91 scale = CLFloatArray(num_elements + pad_size);
Georgios Pinitas4c5469b2019-05-21 13:32:43 +010092 scale.resize(num_elements);
Georgios Pinitas3d13af82019-06-04 13:04:16 +010093 CLScheduler::get().queue().enqueueWriteBuffer(scale.cl_buffer(), CL_TRUE, 0, num_elements * element_size, qinfo.scale().data());
Michalis Spyrou3f632f32019-08-22 16:52:00 +010094
95 if(!qinfo.offset().empty())
96 {
97 // Create offset array
98 const std::vector<int32_t> &qoffset = qinfo.offset();
99 const size_t offset_element_size = sizeof(std::remove_reference<decltype(qoffset)>::type::value_type);
100 offset = CLInt32Array(num_elements + pad_size);
101 offset.resize(num_elements);
102 CLScheduler::get().queue().enqueueWriteBuffer(offset.cl_buffer(), CL_TRUE, 0, num_elements * offset_element_size, qinfo.offset().data());
103 }
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100104}
Georgios Pinitas99d40952018-04-23 16:26:46 +0100105} // namespace
106
Pablo Tellodb8485a2019-09-24 11:03:47 +0100107CLTensorAllocator::CLTensorAllocator(IMemoryManageable *owner, CLRuntimeContext *ctx)
108 : _ctx(ctx), _owner(owner), _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _scale(), _offset()
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100109{
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100110}
111
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100112CLQuantization CLTensorAllocator::quantization() const
113{
114 return { &_scale, &_offset };
115}
116
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100117uint8_t *CLTensorAllocator::data()
118{
Georgios Pinitasdf310362018-11-14 13:16:56 +0000119 return _mapping;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100120}
121
122const cl::Buffer &CLTensorAllocator::cl_data() const
123{
Georgios Pinitasdf310362018-11-14 13:16:56 +0000124 return _memory.region() == nullptr ? _empty_buffer : _memory.cl_region()->cl_data();
Pablo Telloe86a09f2018-01-11 15:44:48 +0000125}
126
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100127void CLTensorAllocator::allocate()
128{
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100129 // Allocate tensor backing memory
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100130 if(_associated_memory_group == nullptr)
131 {
Georgios Pinitasb785dd42019-09-19 12:09:32 +0100132 // Perform memory allocation
Georgios Pinitas035004e2021-04-13 19:44:17 +0100133 if(static_global_cl_allocator != nullptr)
134 {
135 _memory.set_owned_region(static_global_cl_allocator->make_region(info().total_size(), 0));
136 }
Pablo Tellodb8485a2019-09-24 11:03:47 +0100137 else
138 {
Georgios Pinitas2cd5b312021-05-04 21:39:57 +0100139 _memory.set_owned_region(allocate_region(info().total_size(), 0));
Pablo Tellodb8485a2019-09-24 11:03:47 +0100140 }
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100141 }
142 else
143 {
Georgios Pinitas035004e2021-04-13 19:44:17 +0100144 // Finalize memory management instead
Georgios Pinitas26014cf2019-09-09 19:00:57 +0100145 _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment());
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100146 }
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100147
148 // Allocate and fill the quantization parameter arrays
Michalis Spyrouc8530212019-08-22 11:44:04 +0100149 if(is_data_type_quantized_per_channel(info().data_type()))
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100150 {
151 const size_t pad_size = 0;
152 populate_quantization_info(_scale, _offset, info().quantization_info(), pad_size);
153 }
154
155 // Lock allocator
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100156 info().set_is_resizable(false);
157}
158
159void CLTensorAllocator::free()
160{
Georgios Pinitasdf310362018-11-14 13:16:56 +0000161 _mapping = nullptr;
162 _memory.set_region(nullptr);
Georgios Pinitas4c5469b2019-05-21 13:32:43 +0100163 clear_quantization_arrays(_scale, _offset);
Georgios Pinitasdf310362018-11-14 13:16:56 +0000164 info().set_is_resizable(true);
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100165}
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100166
Georgios Pinitas4d0351c2019-04-03 15:11:16 +0100167Status CLTensorAllocator::import_memory(cl::Buffer buffer)
Georgios Pinitas99d40952018-04-23 16:26:46 +0100168{
Georgios Pinitasdf310362018-11-14 13:16:56 +0000169 ARM_COMPUTE_RETURN_ERROR_ON(buffer.get() == nullptr);
Georgios Pinitas4d0351c2019-04-03 15:11:16 +0100170 ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_SIZE>() < info().total_size());
Georgios Pinitasdf310362018-11-14 13:16:56 +0000171 ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_CONTEXT>().get() != CLScheduler::get().context().get());
Georgios Pinitas99d40952018-04-23 16:26:46 +0100172 ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr);
Georgios Pinitasdf310362018-11-14 13:16:56 +0000173
Georgios Pinitas2cd5b312021-05-04 21:39:57 +0100174 _memory.set_owned_region(std::make_unique<CLBufferMemoryRegion>(buffer));
Georgios Pinitas99d40952018-04-23 16:26:46 +0100175
Pablo Tellodb8485a2019-09-24 11:03:47 +0100176 info().set_is_resizable(false);
Georgios Pinitas99d40952018-04-23 16:26:46 +0100177 return Status{};
178}
179
Georgios Pinitas26014cf2019-09-09 19:00:57 +0100180void CLTensorAllocator::set_associated_memory_group(IMemoryGroup *associated_memory_group)
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100181{
182 ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr);
Michalis Spyroucaa7dee2019-09-09 19:23:39 +0100183 ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr && _associated_memory_group != associated_memory_group);
Georgios Pinitasdf310362018-11-14 13:16:56 +0000184 ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.cl_region()->cl_data().get() != nullptr);
185
Georgios Pinitasbaf174e2017-09-08 19:47:30 +0100186 _associated_memory_group = associated_memory_group;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100187}
188
Georgios Pinitas035004e2021-04-13 19:44:17 +0100189void CLTensorAllocator::set_global_allocator(IAllocator *allocator)
190{
191 static_global_cl_allocator = allocator;
192}
193
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100194uint8_t *CLTensorAllocator::lock()
195{
Pablo Tellodb8485a2019-09-24 11:03:47 +0100196 if(_ctx)
197 {
198 return map(_ctx->gpu_scheduler()->queue(), true);
199 }
200 else
201 {
202 return map(CLScheduler::get().queue(), true);
203 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100204}
205
206void CLTensorAllocator::unlock()
207{
Georgios Pinitas99d40952018-04-23 16:26:46 +0100208 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
Pablo Tellodb8485a2019-09-24 11:03:47 +0100209 if(_ctx)
210 {
211 unmap(_ctx->gpu_scheduler()->queue(), reinterpret_cast<uint8_t *>(_memory.region()->buffer()));
212 }
213 else
214 {
215 //Legacy singleton api
216 unmap(CLScheduler::get().queue(), reinterpret_cast<uint8_t *>(_memory.region()->buffer()));
217 }
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100218}
219
220uint8_t *CLTensorAllocator::map(cl::CommandQueue &q, bool blocking)
221{
Georgios Pinitasdf310362018-11-14 13:16:56 +0000222 ARM_COMPUTE_ERROR_ON(_mapping != nullptr);
Georgios Pinitas99d40952018-04-23 16:26:46 +0100223 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
224 ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr);
Georgios Pinitasdf310362018-11-14 13:16:56 +0000225
226 _mapping = reinterpret_cast<uint8_t *>(_memory.cl_region()->map(q, blocking));
227 return _mapping;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100228}
229
230void CLTensorAllocator::unmap(cl::CommandQueue &q, uint8_t *mapping)
231{
Georgios Pinitasdf310362018-11-14 13:16:56 +0000232 ARM_COMPUTE_ERROR_ON(_mapping == nullptr);
233 ARM_COMPUTE_ERROR_ON(_mapping != mapping);
Georgios Pinitas99d40952018-04-23 16:26:46 +0100234 ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr);
235 ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() == nullptr);
Georgios Pinitasdf310362018-11-14 13:16:56 +0000236 ARM_COMPUTE_UNUSED(mapping);
237
238 _memory.cl_region()->unmap(q);
239 _mapping = nullptr;
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100240}
Georgios Pinitasdf310362018-11-14 13:16:56 +0000241} // namespace arm_compute