Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 1 | /* |
Sang-Hoon Park | 68dd25f | 2020-10-19 16:00:11 +0100 | [diff] [blame] | 2 | * Copyright (c) 2016-2020 Arm Limited. |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/runtime/CL/CLTensorAllocator.h" |
| 25 | |
| 26 | #include "arm_compute/core/Error.h" |
| 27 | #include "arm_compute/core/TensorInfo.h" |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 28 | #include "arm_compute/runtime/CL/CLRuntimeContext.h" |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 29 | #include "arm_compute/runtime/CL/CLScheduler.h" |
| 30 | |
Sang-Hoon Park | 68dd25f | 2020-10-19 16:00:11 +0100 | [diff] [blame] | 31 | #include "support/MemorySupport.h" |
| 32 | |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 33 | namespace arm_compute |
| 34 | { |
| 35 | const cl::Buffer CLTensorAllocator::_empty_buffer = cl::Buffer(); |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 36 | |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 37 | namespace |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 38 | { |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 39 | /** Helper function used to allocate the backing memory of a tensor |
| 40 | * |
| 41 | * @param[in] context OpenCL context to use |
| 42 | * @param[in] size Size of the allocation |
| 43 | * @param[in] alignment Alignment of the allocation |
| 44 | * |
| 45 | * @return A wrapped memory region |
| 46 | */ |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 47 | std::unique_ptr<ICLMemoryRegion> allocate_region(CLCoreRuntimeContext *ctx, size_t size, cl_uint alignment) |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 48 | { |
| 49 | // Try fine-grain SVM |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 50 | std::unique_ptr<ICLMemoryRegion> region = support::cpp14::make_unique<CLFineSVMMemoryRegion>(ctx, |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 51 | CL_MEM_READ_WRITE | CL_MEM_SVM_FINE_GRAIN_BUFFER, |
| 52 | size, |
| 53 | alignment); |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 54 | |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 55 | // Try coarse-grain SVM in case of failure |
| 56 | if(region != nullptr && region->ptr() == nullptr) |
| 57 | { |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 58 | region = support::cpp14::make_unique<CLCoarseSVMMemoryRegion>(ctx, CL_MEM_READ_WRITE, size, alignment); |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 59 | } |
| 60 | // Try legacy buffer memory in case of failure |
| 61 | if(region != nullptr && region->ptr() == nullptr) |
| 62 | { |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 63 | region = support::cpp14::make_unique<CLBufferMemoryRegion>(ctx, CL_MEM_ALLOC_HOST_PTR | CL_MEM_READ_WRITE, size); |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 64 | } |
| 65 | return region; |
| 66 | } |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 67 | /** Clears quantization arrays |
| 68 | * |
| 69 | * @param[in, out] scale Quantization scale array |
| 70 | * @param[in, out] offset Quantization offset array |
| 71 | */ |
| 72 | void clear_quantization_arrays(CLFloatArray &scale, CLInt32Array &offset) |
| 73 | { |
| 74 | // Clear arrays |
| 75 | scale = CLFloatArray(); |
| 76 | offset = CLInt32Array(); |
| 77 | } |
| 78 | /** Helper function used to create quantization data arrays |
| 79 | * |
| 80 | * @param[in, out] scale Quantization scale array |
| 81 | * @param[in, out] offset Quantization offset array |
| 82 | * @param[in] qinfo Quantization info |
| 83 | * @param[in] pad_size Pad size to use in case array needs to be padded for computation purposes |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 84 | */ |
| 85 | void populate_quantization_info(CLFloatArray &scale, CLInt32Array &offset, const QuantizationInfo &qinfo, size_t pad_size) |
| 86 | { |
| 87 | clear_quantization_arrays(scale, offset); |
| 88 | |
| 89 | // Create scale array |
Georgios Pinitas | 3d13af8 | 2019-06-04 13:04:16 +0100 | [diff] [blame] | 90 | const std::vector<float> &qscale = qinfo.scale(); |
| 91 | const size_t num_elements = qscale.size(); |
| 92 | const size_t element_size = sizeof(std::remove_reference<decltype(qscale)>::type::value_type); |
| 93 | scale = CLFloatArray(num_elements + pad_size); |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 94 | scale.resize(num_elements); |
Georgios Pinitas | 3d13af8 | 2019-06-04 13:04:16 +0100 | [diff] [blame] | 95 | CLScheduler::get().queue().enqueueWriteBuffer(scale.cl_buffer(), CL_TRUE, 0, num_elements * element_size, qinfo.scale().data()); |
Michalis Spyrou | 3f632f3 | 2019-08-22 16:52:00 +0100 | [diff] [blame] | 96 | |
| 97 | if(!qinfo.offset().empty()) |
| 98 | { |
| 99 | // Create offset array |
| 100 | const std::vector<int32_t> &qoffset = qinfo.offset(); |
| 101 | const size_t offset_element_size = sizeof(std::remove_reference<decltype(qoffset)>::type::value_type); |
| 102 | offset = CLInt32Array(num_elements + pad_size); |
| 103 | offset.resize(num_elements); |
| 104 | CLScheduler::get().queue().enqueueWriteBuffer(offset.cl_buffer(), CL_TRUE, 0, num_elements * offset_element_size, qinfo.offset().data()); |
| 105 | } |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 106 | } |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 107 | } // namespace |
| 108 | |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 109 | CLTensorAllocator::CLTensorAllocator(IMemoryManageable *owner, CLRuntimeContext *ctx) |
| 110 | : _ctx(ctx), _owner(owner), _associated_memory_group(nullptr), _memory(), _mapping(nullptr), _scale(), _offset() |
Georgios Pinitas | baf174e | 2017-09-08 19:47:30 +0100 | [diff] [blame] | 111 | { |
Georgios Pinitas | baf174e | 2017-09-08 19:47:30 +0100 | [diff] [blame] | 112 | } |
| 113 | |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 114 | CLQuantization CLTensorAllocator::quantization() const |
| 115 | { |
| 116 | return { &_scale, &_offset }; |
| 117 | } |
| 118 | |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 119 | uint8_t *CLTensorAllocator::data() |
| 120 | { |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 121 | return _mapping; |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | const cl::Buffer &CLTensorAllocator::cl_data() const |
| 125 | { |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 126 | return _memory.region() == nullptr ? _empty_buffer : _memory.cl_region()->cl_data(); |
Pablo Tello | e86a09f | 2018-01-11 15:44:48 +0000 | [diff] [blame] | 127 | } |
| 128 | |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 129 | void CLTensorAllocator::allocate() |
| 130 | { |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 131 | // Allocate tensor backing memory |
Georgios Pinitas | baf174e | 2017-09-08 19:47:30 +0100 | [diff] [blame] | 132 | if(_associated_memory_group == nullptr) |
| 133 | { |
Georgios Pinitas | b785dd4 | 2019-09-19 12:09:32 +0100 | [diff] [blame] | 134 | // Perform memory allocation |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 135 | if(_ctx == nullptr) |
| 136 | { |
| 137 | auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue()); |
| 138 | _memory.set_owned_region(allocate_region(&legacy_ctx, info().total_size(), 0)); |
| 139 | } |
| 140 | else |
| 141 | { |
| 142 | _memory.set_owned_region(allocate_region(_ctx->core_runtime_context(), info().total_size(), 0)); |
| 143 | } |
Georgios Pinitas | baf174e | 2017-09-08 19:47:30 +0100 | [diff] [blame] | 144 | } |
| 145 | else |
| 146 | { |
Georgios Pinitas | 26014cf | 2019-09-09 19:00:57 +0100 | [diff] [blame] | 147 | _associated_memory_group->finalize_memory(_owner, _memory, info().total_size(), alignment()); |
Georgios Pinitas | baf174e | 2017-09-08 19:47:30 +0100 | [diff] [blame] | 148 | } |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 149 | |
| 150 | // Allocate and fill the quantization parameter arrays |
Michalis Spyrou | c853021 | 2019-08-22 11:44:04 +0100 | [diff] [blame] | 151 | if(is_data_type_quantized_per_channel(info().data_type())) |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 152 | { |
| 153 | const size_t pad_size = 0; |
| 154 | populate_quantization_info(_scale, _offset, info().quantization_info(), pad_size); |
| 155 | } |
| 156 | |
| 157 | // Lock allocator |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 158 | info().set_is_resizable(false); |
| 159 | } |
| 160 | |
| 161 | void CLTensorAllocator::free() |
| 162 | { |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 163 | _mapping = nullptr; |
| 164 | _memory.set_region(nullptr); |
Georgios Pinitas | 4c5469b | 2019-05-21 13:32:43 +0100 | [diff] [blame] | 165 | clear_quantization_arrays(_scale, _offset); |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 166 | info().set_is_resizable(true); |
Georgios Pinitas | baf174e | 2017-09-08 19:47:30 +0100 | [diff] [blame] | 167 | } |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 168 | |
Georgios Pinitas | 4d0351c | 2019-04-03 15:11:16 +0100 | [diff] [blame] | 169 | Status CLTensorAllocator::import_memory(cl::Buffer buffer) |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 170 | { |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 171 | ARM_COMPUTE_RETURN_ERROR_ON(buffer.get() == nullptr); |
Georgios Pinitas | 4d0351c | 2019-04-03 15:11:16 +0100 | [diff] [blame] | 172 | ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_SIZE>() < info().total_size()); |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 173 | ARM_COMPUTE_RETURN_ERROR_ON(buffer.getInfo<CL_MEM_CONTEXT>().get() != CLScheduler::get().context().get()); |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 174 | ARM_COMPUTE_RETURN_ERROR_ON(_associated_memory_group != nullptr); |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 175 | |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 176 | if(_ctx == nullptr) |
| 177 | { |
| 178 | auto legacy_ctx = CLCoreRuntimeContext(nullptr, CLScheduler::get().context(), CLScheduler::get().queue()); |
| 179 | _memory.set_owned_region(support::cpp14::make_unique<CLBufferMemoryRegion>(buffer, &legacy_ctx)); |
| 180 | } |
| 181 | else |
| 182 | { |
| 183 | _memory.set_owned_region(support::cpp14::make_unique<CLBufferMemoryRegion>(buffer, _ctx->core_runtime_context())); |
| 184 | } |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 185 | |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 186 | info().set_is_resizable(false); |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 187 | return Status{}; |
| 188 | } |
| 189 | |
Georgios Pinitas | 26014cf | 2019-09-09 19:00:57 +0100 | [diff] [blame] | 190 | void CLTensorAllocator::set_associated_memory_group(IMemoryGroup *associated_memory_group) |
Georgios Pinitas | baf174e | 2017-09-08 19:47:30 +0100 | [diff] [blame] | 191 | { |
| 192 | ARM_COMPUTE_ERROR_ON(associated_memory_group == nullptr); |
Michalis Spyrou | caa7dee | 2019-09-09 19:23:39 +0100 | [diff] [blame] | 193 | ARM_COMPUTE_ERROR_ON(_associated_memory_group != nullptr && _associated_memory_group != associated_memory_group); |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 194 | ARM_COMPUTE_ERROR_ON(_memory.region() != nullptr && _memory.cl_region()->cl_data().get() != nullptr); |
| 195 | |
Georgios Pinitas | baf174e | 2017-09-08 19:47:30 +0100 | [diff] [blame] | 196 | _associated_memory_group = associated_memory_group; |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 197 | } |
| 198 | |
| 199 | uint8_t *CLTensorAllocator::lock() |
| 200 | { |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 201 | if(_ctx) |
| 202 | { |
| 203 | return map(_ctx->gpu_scheduler()->queue(), true); |
| 204 | } |
| 205 | else |
| 206 | { |
| 207 | return map(CLScheduler::get().queue(), true); |
| 208 | } |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | void CLTensorAllocator::unlock() |
| 212 | { |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 213 | ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr); |
Pablo Tello | db8485a | 2019-09-24 11:03:47 +0100 | [diff] [blame] | 214 | if(_ctx) |
| 215 | { |
| 216 | unmap(_ctx->gpu_scheduler()->queue(), reinterpret_cast<uint8_t *>(_memory.region()->buffer())); |
| 217 | } |
| 218 | else |
| 219 | { |
| 220 | //Legacy singleton api |
| 221 | unmap(CLScheduler::get().queue(), reinterpret_cast<uint8_t *>(_memory.region()->buffer())); |
| 222 | } |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | uint8_t *CLTensorAllocator::map(cl::CommandQueue &q, bool blocking) |
| 226 | { |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 227 | ARM_COMPUTE_ERROR_ON(_mapping != nullptr); |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 228 | ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr); |
| 229 | ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() != nullptr); |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 230 | |
| 231 | _mapping = reinterpret_cast<uint8_t *>(_memory.cl_region()->map(q, blocking)); |
| 232 | return _mapping; |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 233 | } |
| 234 | |
| 235 | void CLTensorAllocator::unmap(cl::CommandQueue &q, uint8_t *mapping) |
| 236 | { |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 237 | ARM_COMPUTE_ERROR_ON(_mapping == nullptr); |
| 238 | ARM_COMPUTE_ERROR_ON(_mapping != mapping); |
Georgios Pinitas | 99d4095 | 2018-04-23 16:26:46 +0100 | [diff] [blame] | 239 | ARM_COMPUTE_ERROR_ON(_memory.region() == nullptr); |
| 240 | ARM_COMPUTE_ERROR_ON(_memory.region()->buffer() == nullptr); |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 241 | ARM_COMPUTE_UNUSED(mapping); |
| 242 | |
| 243 | _memory.cl_region()->unmap(q); |
| 244 | _mapping = nullptr; |
Anthony Barbier | 6ff3b19 | 2017-09-04 18:44:23 +0100 | [diff] [blame] | 245 | } |
Georgios Pinitas | df31036 | 2018-11-14 13:16:56 +0000 | [diff] [blame] | 246 | } // namespace arm_compute |