blob: 767154b45eff5b66d2c3f3d2d2422bc7c13a69e6 [file] [log] [blame]
Georgios Pinitasd8734b52017-12-22 15:27:52 +00001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitasd9eb2752018-04-03 13:44:29 +010024#include "arm_compute/graph/detail/ExecutionHelpers.h"
Georgios Pinitasd8734b52017-12-22 15:27:52 +000025
Georgios Pinitasd9eb2752018-04-03 13:44:29 +010026#include "arm_compute/graph/Graph.h"
27#include "arm_compute/graph/GraphContext.h"
28#include "arm_compute/graph/GraphManager.h"
29#include "arm_compute/graph/Tensor.h"
30#include "arm_compute/graph/backends/BackendRegistry.h"
Georgios Pinitasd8734b52017-12-22 15:27:52 +000031
32namespace arm_compute
33{
Georgios Pinitasd9eb2752018-04-03 13:44:29 +010034namespace graph
Georgios Pinitasd8734b52017-12-22 15:27:52 +000035{
36namespace detail
37{
Georgios Pinitascac13b12018-04-27 19:07:19 +010038void validate_all_nodes(Graph &g)
39{
40 auto &nodes = g.nodes();
41
42 // Create tasks
43 for(auto &node : nodes)
44 {
45 if(node != nullptr)
46 {
Anthony Barbier890ad1b2018-08-22 13:44:36 +010047 Target assigned_target = node->assigned_target();
48 backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
49 Status status = backend.validate_node(*node);
Georgios Pinitascac13b12018-04-27 19:07:19 +010050 ARM_COMPUTE_ERROR_ON_MSG(!bool(status), status.error_description().c_str());
51 }
52 }
53}
54
Georgios Pinitasd8734b52017-12-22 15:27:52 +000055void configure_all_tensors(Graph &g)
56{
57 auto &tensors = g.tensors();
58
59 for(auto &tensor : tensors)
60 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +010061 if(tensor && tensor->handle() == nullptr)
Georgios Pinitasd8734b52017-12-22 15:27:52 +000062 {
Anthony Barbier890ad1b2018-08-22 13:44:36 +010063 Target target = tensor->desc().target;
64 backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(target);
65 std::unique_ptr<ITensorHandle> handle = backend.create_tensor(*tensor);
66 ARM_COMPUTE_ERROR_ON_MSG(!handle, "Couldn't create backend handle!");
Georgios Pinitasd8734b52017-12-22 15:27:52 +000067 tensor->set_handle(std::move(handle));
68 }
69 }
70}
71
Georgios Pinitase0437672018-05-02 14:07:55 +010072void allocate_all_input_tensors(INode &node)
73{
74 for(unsigned int i = 0; i < node.num_inputs(); ++i)
75 {
76 Tensor *tensor = node.input(i);
77 if(tensor != nullptr && !tensor->bound_edges().empty())
78 {
79 ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
80 tensor->handle()->allocate();
81 }
82 }
83}
84
85void allocate_all_output_tensors(INode &node)
86{
87 for(unsigned int i = 0; i < node.num_outputs(); ++i)
88 {
89 Tensor *tensor = node.output(i);
90 if(tensor != nullptr && !tensor->bound_edges().empty())
91 {
92 ARM_COMPUTE_ERROR_ON_MSG(!tensor->handle(), "Tensor handle is not configured!");
93 tensor->handle()->allocate();
94 }
95 }
96}
97
98void allocate_const_tensors(Graph &g)
99{
100 for(auto &node : g.nodes())
101 {
102 if(node != nullptr)
103 {
104 switch(node->type())
105 {
106 case NodeType::Const:
107 case NodeType::Input:
108 allocate_all_output_tensors(*node);
109 break;
110 case NodeType::Output:
111 allocate_all_input_tensors(*node);
112 default:
113 break;
114 }
115 }
116 }
117}
118
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000119void allocate_all_tensors(Graph &g)
120{
121 auto &tensors = g.tensors();
122
123 for(auto &tensor : tensors)
124 {
Georgios Pinitase0437672018-05-02 14:07:55 +0100125 if(tensor && !tensor->bound_edges().empty() && tensor->handle() != nullptr && tensor->handle()->tensor().info()->is_resizable() && tensor->handle()->tensor().is_used())
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000126 {
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000127 tensor->handle()->allocate();
128 }
129 }
130}
131
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100132ExecutionWorkload configure_all_nodes(Graph &g, GraphContext &ctx, const std::vector<NodeID> &node_order)
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000133{
134 ExecutionWorkload workload;
Georgios Pinitase0437672018-05-02 14:07:55 +0100135 workload.graph = &g;
Georgios Pinitas3d1489d2018-05-03 20:47:16 +0100136 workload.ctx = &ctx;
137
Georgios Pinitas3ba0ab12018-12-12 19:01:46 +0000138 // Reserve memory for tasks
139 workload.tasks.reserve(node_order.size());
140
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000141 // Create tasks
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100142 for(auto &node_id : node_order)
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000143 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100144 auto node = g.node(node_id);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000145 if(node != nullptr)
146 {
Anthony Barbier890ad1b2018-08-22 13:44:36 +0100147 Target assigned_target = node->assigned_target();
148 backends::IDeviceBackend &backend = backends::BackendRegistry::get().get_backend(assigned_target);
149 std::unique_ptr<IFunction> func = backend.configure_node(*node, ctx);
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000150 if(func != nullptr)
151 {
Georgios Pinitas3ba0ab12018-12-12 19:01:46 +0000152 workload.tasks.emplace_back(ExecutionTask(std::move(func), node));
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000153 }
154 }
155 }
156
157 // Add inputs and outputs
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100158 for(auto &node : g.nodes())
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000159 {
160 if(node != nullptr && node->type() == NodeType::Input)
161 {
162 workload.inputs.push_back(node->output(0));
163 }
164
165 if(node != nullptr && node->type() == NodeType::Output)
166 {
167 workload.outputs.push_back(node->input(0));
168 continue;
169 }
170 }
171
172 return workload;
173}
174
Georgios Pinitas1562be32018-03-08 19:09:19 +0000175void release_unused_tensors(Graph &g)
176{
177 for(auto &tensor : g.tensors())
178 {
179 if(tensor != nullptr && tensor->handle() != nullptr)
180 {
181 tensor->handle()->release_if_unused();
182 }
183 }
184}
185
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000186void call_tensor_accessor(Tensor *tensor)
187{
188 ARM_COMPUTE_ERROR_ON(!tensor);
189 tensor->call_accessor();
190}
191
192void call_all_const_node_accessors(Graph &g)
193{
194 auto &nodes = g.nodes();
195
196 for(auto &node : nodes)
197 {
198 if(node != nullptr && node->type() == NodeType::Const)
199 {
200 call_tensor_accessor(node->output(0));
201 }
202 }
203}
204
Georgios Pinitas12be7ab2018-07-03 12:06:23 +0100205bool call_all_input_node_accessors(ExecutionWorkload &workload)
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000206{
Georgios Pinitas12be7ab2018-07-03 12:06:23 +0100207 return !std::any_of(std::begin(workload.inputs), std::end(workload.inputs), [](Tensor * input_tensor)
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000208 {
Georgios Pinitas12be7ab2018-07-03 12:06:23 +0100209 return (input_tensor == nullptr) || !input_tensor->call_accessor();
210 });
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000211}
212
Georgios Pinitase0437672018-05-02 14:07:55 +0100213void prepare_all_tasks(ExecutionWorkload &workload)
214{
215 ARM_COMPUTE_ERROR_ON(workload.graph == nullptr);
216 for(auto &task : workload.tasks)
217 {
218 task.prepare();
219 release_unused_tensors(*workload.graph);
220 }
221}
222
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000223void call_all_tasks(ExecutionWorkload &workload)
224{
Georgios Pinitas3d1489d2018-05-03 20:47:16 +0100225 ARM_COMPUTE_ERROR_ON(workload.ctx == nullptr);
226
227 // Acquire memory for the transition buffers
228 for(auto &mm_ctx : workload.ctx->memory_managers())
229 {
230 if(mm_ctx.second.cross_group != nullptr)
231 {
232 mm_ctx.second.cross_group->acquire();
233 }
234 }
235
236 // Execute tasks
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000237 for(auto &task : workload.tasks)
238 {
239 task();
240 }
Georgios Pinitas3d1489d2018-05-03 20:47:16 +0100241
242 // Release memory for the transition buffers
243 for(auto &mm_ctx : workload.ctx->memory_managers())
244 {
245 if(mm_ctx.second.cross_group != nullptr)
246 {
247 mm_ctx.second.cross_group->release();
248 }
249 }
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000250}
251
Georgios Pinitas12be7ab2018-07-03 12:06:23 +0100252bool call_all_output_node_accessors(ExecutionWorkload &workload)
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000253{
Georgios Pinitas12be7ab2018-07-03 12:06:23 +0100254 bool is_valid = true;
255 std::for_each(std::begin(workload.outputs), std::end(workload.outputs), [&](Tensor * output_tensor)
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000256 {
Pablo Tello32521432018-11-15 14:43:10 +0000257 bool valid_output = (output_tensor != nullptr) && output_tensor->call_accessor();
258 is_valid = is_valid && valid_output;
Georgios Pinitas12be7ab2018-07-03 12:06:23 +0100259 });
260
261 return is_valid;
Georgios Pinitasd8734b52017-12-22 15:27:52 +0000262}
263} // namespace detail
Georgios Pinitasd9eb2752018-04-03 13:44:29 +0100264} // namespace graph
Anthony Barbier890ad1b2018-08-22 13:44:36 +0100265} // namespace arm_compute