blob: e06033121a51044c098a0be902d273629b498633 [file] [log] [blame]
Georgios Pinitasd8734b52017-12-22 15:27:52 +00001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/graph2/backends/CL/CLDeviceBackend.h"
25
26#include "arm_compute/graph2/Graph.h"
27#include "arm_compute/graph2/GraphContext.h"
28#include "arm_compute/graph2/INode.h"
29#include "arm_compute/graph2/Logger.h"
30#include "arm_compute/graph2/Tensor.h"
31#include "arm_compute/graph2/backends/BackendRegistrar.h"
32#include "arm_compute/graph2/backends/CL/CLFunctionFactory.h"
33#include "arm_compute/graph2/backends/CL/CLSubTensorHandle.h"
34#include "arm_compute/graph2/backends/CL/CLTensorHandle.h"
35
36#include "arm_compute/core/TensorInfo.h"
37#include "arm_compute/runtime/BlobLifetimeManager.h"
38#include "arm_compute/runtime/CL/CLBufferAllocator.h"
39#include "arm_compute/runtime/CL/CLScheduler.h"
40#include "arm_compute/runtime/MemoryManagerOnDemand.h"
41#include "arm_compute/runtime/PoolManager.h"
42
43#include "support/ToolchainSupport.h"
44
45namespace arm_compute
46{
47namespace graph2
48{
49namespace backends
50{
51namespace
52{
53bool file_exists(const std::string &filename)
54{
55 std::ifstream file(filename);
56 return file.good();
57}
58} // namespace
59
60/** Register CL backend */
61static detail::BackendRegistrar<CLDeviceBackend> CLDeviceBackend_registrar(Target::CL);
62
63/** Tuner export file */
64static const std::string tuner_data_filename = "acl_tuner.csv";
65
66CLDeviceBackend::CLDeviceBackend()
67 : _tuner(), _allocator(cl::Context::getDefault())
68{
69}
70
71CLDeviceBackend::~CLDeviceBackend()
72{
73 // TODO (geopin01) : Shouldn't call non exception safe stuff here
74 if(_tuner.tune_new_kernels() && !_tuner.lws_table().empty())
75 {
76 _tuner.save_to_file(tuner_data_filename);
77 }
78}
79
80void CLDeviceBackend::set_kernel_tuning(bool enable_tuning)
81{
82 _tuner.set_tune_new_kernels(enable_tuning);
83}
84
85void CLDeviceBackend::initialize_backend()
86{
87 // Load tuner data if available
88 if(_tuner.lws_table().empty() && file_exists(tuner_data_filename))
89 {
90 _tuner.load_from_file(tuner_data_filename);
91 }
92
93 // Setup Scheduler
94 CLScheduler::get().default_init(&_tuner);
95
96 // Create allocator with new context
97 _allocator = CLBufferAllocator();
98}
99
100void CLDeviceBackend::setup_backend_context(GraphContext &ctx)
101{
102 // Setup tuner
103 set_kernel_tuning(ctx.is_tuning_enabled());
104
105 // Setup a management backend
106 if(ctx.memory_management_ctx(Target::CL) == nullptr)
107 {
108 MemoryManagerContext mm_ctx;
109 mm_ctx.target = Target::CL;
110 mm_ctx.mm = create_memory_manager(MemoryManagerAffinity::Buffer);
111
112 ctx.insert_memory_management_ctx(std::move(mm_ctx));
113 }
114}
115
116std::unique_ptr<ITensorHandle> CLDeviceBackend::create_tensor(const Tensor &tensor)
117{
118 // Get tensor descriptor
119 const TensorDescriptor &tensor_desc = tensor.desc();
120 ARM_COMPUTE_ERROR_ON(tensor_desc.target != Target::CL);
121
122 // Create backend tensor handle
123 TensorInfo info(tensor_desc.shape, 1, tensor_desc.data_type);
124 auto backend_tensor_handle = support::cpp14::make_unique<CLTensorHandle>(info);
125
126 return std::move(backend_tensor_handle);
127}
128
129std::unique_ptr<ITensorHandle> CLDeviceBackend::create_subtensor(ITensorHandle *parent, TensorShape shape, Coordinates coords)
130{
131 if(parent == nullptr)
132 {
133 return nullptr;
134 }
135
136 return support::cpp14::make_unique<CLSubTensorHandle>(parent, shape, coords);
137}
138
139std::unique_ptr<arm_compute::IFunction> CLDeviceBackend::configure_node(INode &node, GraphContext &ctx)
140{
141 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Configuring CL node with ID : " << node.id() << std::endl);
142 ARM_COMPUTE_ERROR_ON(node.assigned_target() != Target::CL);
143
144 // Configure node
145 return CLFunctionFactory::create(&node, ctx);
146}
147
148arm_compute::Status CLDeviceBackend::validate_node(const INode &node)
149{
150 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Validating CL node with ID : " << node.id() << std::endl);
151
152 ARM_COMPUTE_UNUSED(node);
153
154 return Status{};
155}
156
157std::shared_ptr<arm_compute::IMemoryManager> CLDeviceBackend::create_memory_manager(MemoryManagerAffinity affinity)
158{
159 if(affinity == MemoryManagerAffinity::Offset)
160 {
161 ARM_COMPUTE_LOG_GRAPH_WARNING("CL Backend does not support offset affinity memory management!");
162 return nullptr;
163 }
164
165 auto lifetime_mgr = std::make_shared<BlobLifetimeManager>();
166 auto pool_mgr = std::make_shared<PoolManager>();
167 auto mm = std::make_shared<MemoryManagerOnDemand>(lifetime_mgr, pool_mgr);
168
169 mm->set_allocator(&_allocator);
170
171 return mm;
172}
173} // namespace backends
174} // namespace graph2
175} // namespace arm_compute