blob: e21b8ed288854ac73493e21d8ab27b9b5e291515 [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00002 * Copyright (c) 2018-2020 ARM Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010031#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000032#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010033#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010034#include "arm_compute/graph/backends/Utils.h"
35#include "arm_compute/graph/nodes/Nodes.h"
36
37#include "arm_compute/core/Error.h"
38#include "arm_compute/core/Helpers.h"
39#include "arm_compute/core/ITensorInfo.h"
40#include "arm_compute/core/utils/misc/Cast.h"
41
42namespace arm_compute
43{
44namespace graph
45{
46namespace backends
47{
48namespace detail
49{
Georgios Pinitas0b192e82020-02-20 17:09:28 +000050// Address rule DR-9R5 (1579. Return by converting move constructor)
51#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5))
52#define RETURN_UNIQUE_PTR(x) (x)
53#else /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
54#define RETURN_UNIQUE_PTR(x) (std::move(x))
55#endif /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
56
Georgios Pinitasda2491f2018-06-01 17:49:09 +010057/** Returns backing tensor of a given tensor
58 *
59 * @tparam TargetInfo Target information
60 *
61 * @param[in] tensor Tensor to extract the backing tensor from
62 *
63 * @return Backing tensor if present else nullptr
64 */
65template <typename TargetInfo>
66typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
67{
68 typename TargetInfo::TensorType *backing_tensor = nullptr;
69 if(tensor != nullptr)
70 {
71 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
72 // Get backing tensor handle
73 ITensorHandle *tensor_handle = tensor->handle();
74 // Get backing tensor
75 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
76 }
77
78 return backing_tensor;
79}
80
81template <typename TargetInfo>
82void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
83{
84 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000085 << " Target: " << TargetInfo::TargetType
86 << " ID: " << node.id()
87 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010088 << std::endl);
89
90 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
91 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
92 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010093 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010094}
95
96/** Creates a backend activation layer function
97 *
98 * @tparam ActivationLayerFunction Backend activation function
99 * @tparam TargetInfo Target-specific information
100 *
101 * @param[in] node Node to create the backend function for
102 *
103 * @return Backend activation layer function
104 */
105template <typename ActivationLayerFunction, typename TargetInfo>
106std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
107{
108 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
109
110 // Extract IO and info
111 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
112 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
113 const ActivationLayerInfo act_info = node.activation_info();
114
115 // Create function
116 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
117 func->configure(input, output, act_info);
118
Pablo Tello32521432018-11-15 14:43:10 +0000119 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
120 << node.name()
121 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000122 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100123 << " Data Type: " << input->info()->data_type()
124 << " Shape: " << input->info()->tensor_shape()
125 << " Activation function: " << act_info.activation()
126 << " a: " << act_info.a()
127 << " b: " << act_info.b()
128 << " InPlace : " << is_in_place_operation(input, output)
129 << std::endl);
130
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000131 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100132}
133
134/** Create a backend batch normalization layer function
135 *
136 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
137 * @tparam TargetInfo Target-specific information
138 *
139 * @param[in] node Node to create the backend function for
140 *
141 * @return Backend batch normalization layer function
142 */
143template <typename BatchNormalizationLayerFunction, typename TargetInfo>
144std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
145{
146 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
147
148 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000149 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
150 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
151 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
152 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
153 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
154
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100155 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
156 const float epsilon = node.epsilon();
157 const ActivationLayerInfo fused_act = node.fused_activation();
158
159 // Create and configure function
160 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
161 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
162
163 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000164 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
165 << node.name()
166 << " Type: " << node.type()
167 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100168 << " Data Type: " << input->info()->data_type()
169 << " Shape: " << input->info()->tensor_shape()
170 << " Epsilon: " << epsilon << " "
171 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000172 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100173 << std::endl);
174
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000175 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100176}
177
giuros01acce5042019-02-21 17:32:34 +0000178/** Create a backend batch normalization layer function
179 *
180 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
181 * @tparam TargetInfo Target-specific information
182 *
183 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000184 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000185 *
186 * @return Backend batch normalization layer function
187 */
188template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000189std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000190{
191 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
192
193 // Extract IO and info
194 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
195 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
196 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
197 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
198 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
199 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
200 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
201
202 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
203
204 const PadStrideInfo conv_info = node.convolution_info();
205 const unsigned int num_groups = node.num_groups();
206 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
207 const ActivationLayerInfo fused_act = node.fused_activation();
208 const float epsilon = node.epsilon();
209
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000210 // Create and configure function (we assume that functions have been validated before creation)
211 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
212 std::unique_ptr<IFunction> func;
213 std::string func_name;
214
215 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
216
giuros01acce5042019-02-21 17:32:34 +0000217 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000218 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
219 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000220
221 // Log info
222 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
223 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100224 << " Type: " << node.type()
225 << " Target: " << TargetInfo::TargetType
226 << " Data Type: " << input->info()->data_type()
227 << " Input shape: " << input->info()->tensor_shape()
228 << " Weights shape: " << weights->info()->tensor_shape()
229 << " Output shape: " << output->info()->tensor_shape()
230 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
231 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000232 return RETURN_UNIQUE_PTR(func);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100233}
234
235/** Create a backend fused depthwise convolution batch normalization layer function
236 *
237 * @tparam FusedLayerTypes Fused layer types
238 * @tparam TargetInfo Target-specific information
239 *
240 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000241 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100242 *
243 * @return Backend fused depthwise convolution batch normalization layer function
244 */
245template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000246std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100247{
248 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
249
250 // Extract IO and info
251 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
252 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
253 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
254 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
255 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
256 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
257 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
258
259 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
260
261 const PadStrideInfo conv_info = node.convolution_info();
262 const unsigned int depth_multiplier = node.depth_multiplier();
263 const ActivationLayerInfo fused_act = node.fused_activation();
264 const float epsilon = node.epsilon();
265
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000266 // Create and configure function (we assume that functions have been validated before creation)
267 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
268 std::unique_ptr<IFunction> func;
269 std::string func_name;
270
271 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
272
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100273 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000274 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
275 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100276
277 // Log info
278 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
279 << node.name()
280 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000281 << " Target: " << TargetInfo::TargetType
282 << " Data Type: " << input->info()->data_type()
283 << " Input shape: " << input->info()->tensor_shape()
284 << " Weights shape: " << weights->info()->tensor_shape()
285 << " Output shape: " << output->info()->tensor_shape()
286 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
287 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000288 return RETURN_UNIQUE_PTR(func);
giuros01acce5042019-02-21 17:32:34 +0000289}
290
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100291/** Create a backend bounding box transform layer function
292 *
293 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
294 * @tparam TargetInfo Target-specific information
295 *
296 * @param[in] node Node to create the backend function for
297 *
298 * @return Backend bounding box transform layer function
299 */
300template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
301std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
302{
303 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
304
305 // Extract IO and info
306 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
307 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
308 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
309 const BoundingBoxTransformInfo bbox_info = node.info();
310
311 // Create and configure function
312 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
313 func->configure(input, output, deltas, bbox_info);
314
315 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000316 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
317 << node.name()
318 << " Type: " << node.type()
319 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100320 << " Data Type: " << input->info()->data_type()
321 << " Shape: " << input->info()->tensor_shape()
322 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
323 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
324 << std::endl);
325
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000326 return RETURN_UNIQUE_PTR(func);
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100327}
328
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100329/** Create a backend channel shuffle layer function
330 *
331 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
332 * @tparam TargetInfo Target-specific information
333 *
334 * @param[in] node Node to create the backend function for
335 *
336 * @return Backend channel shuffle layer function
337 */
338template <typename ChannelShuffleLayerFunction, typename TargetInfo>
339std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
340{
341 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
342
343 // Extract IO and info
344 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
345 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
346 const unsigned int num_groups = node.num_groups();
347
348 // Create function
349 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
350 func->configure(input, output, num_groups);
351
Pablo Tello32521432018-11-15 14:43:10 +0000352 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
353 << node.name()
354 << " Type: " << node.type()
355 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100356 << " Data Type: " << input->info()->data_type()
357 << " Shape: " << input->info()->tensor_shape()
358 << " Num groups: " << num_groups
359 << std::endl);
360
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000361 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362}
363
Georgios Pinitase2220552018-07-20 13:23:44 +0100364/** Create a backend layer concatenate function
365 *
366 * @tparam ConcatenateLayerFunction Backend concatenate function
367 * @tparam TargetInfo Target-specific information
368 *
369 * @param[in] node Node to create the backend function for
370 *
371 * @return Backend concatenate layer function
372 */
373template <typename ConcatenateLayerFunction, typename TargetInfo>
374std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
375{
376 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
377 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
378
379 // Return nullptr if depth concatenate is switched off
380 if(!node.is_enabled())
381 {
382 return nullptr;
383 }
384
385 // Extract IO and info
386 std::vector<typename TargetInfo::TensorType *> inputs;
387 for(unsigned int i = 0; i < node.num_inputs(); ++i)
388 {
389 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
390 }
391 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100392 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
393 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100394
395 // Create and configure function
396 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
397 func->configure(inputs, output, concat_axis);
398
399 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000400 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
401 std::ostringstream qss;
402 if(is_quantized)
403 {
404 qss << " Output QuantInfo: " << output->info()->quantization_info();
405 }
Pablo Tello32521432018-11-15 14:43:10 +0000406 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
407 << node.name()
408 << " Type: " << node.type()
409 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100410 << " Data Type: " << output->info()->data_type()
411 << " Shape: " << output->info()->tensor_shape()
412 << " Num Inputs: " << inputs.size()
413 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000414 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100415 << std::endl);
416
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000417 return RETURN_UNIQUE_PTR(func);
Georgios Pinitase2220552018-07-20 13:23:44 +0100418}
419
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100420/** Create a backend convolution layer function
421 *
422 * @tparam ConvolutionLayerFunctions Backend convolution functions
423 * @tparam TargetInfo Target-specific information
424 *
425 * @param[in] node Node to create the backend function for
426 * @param[in] ctx Graph context
427 *
428 * @return Backend convolution layer function
429 */
430template <typename ConvolutionLayerFunctions, typename TargetInfo>
431std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
432{
433 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
434
435 // Extract IO and info
436 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
437 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
438 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
439 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
440
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100441 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
442
443 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100444 {
445 biases->info()->set_data_type(DataType::S32);
446 }
447
Georgios Pinitas08346e92018-10-16 19:10:46 +0100448 const PadStrideInfo conv_info = node.convolution_info();
449 const unsigned int num_groups = node.num_groups();
450 const ConvolutionMethod conv_algorithm = node.convolution_method();
451 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
452 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453
454 // Create and configure function (we assume that functions have been validated before creation)
455 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
456 std::unique_ptr<IFunction> func;
457 std::string func_name;
458
Georgios Pinitase2220552018-07-20 13:23:44 +0100459 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100460 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100461 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100462 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
463 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100464 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100466 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100467 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100468 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100469 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
470 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100471 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100472 }
473 else if(conv_algorithm == ConvolutionMethod::GEMM)
474 {
475 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
476 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100477 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100478 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100479 }
480 else
481 {
482 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
483 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100484 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100485 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100486 }
487
488 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100489 std::ostringstream qss;
490 if(is_quantized)
491 {
492 qss << " Input QuantInfo: " << input->info()->quantization_info()
493 << " Weights QuantInfo: " << weights->info()->quantization_info()
494 << " Output QuantInfo: " << output->info()->quantization_info();
495 }
Pablo Tello32521432018-11-15 14:43:10 +0000496 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
497 << node.name()
498 << " Type: " << func_name
499 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100501 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100502 << " Input shape: " << input->info()->tensor_shape()
503 << " Weights shape: " << weights->info()->tensor_shape()
504 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000505 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100506 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100507 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000508 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100509}
510
511/** Create a backend deconvolution layer function
512 *
513 * @tparam DeconvolutionLayerFunction Backend deconvolution function
514 * @tparam TargetInfo Target-specific information
515 *
516 * @param[in] node Node to create the backend function for
517 * @param[in] ctx Graph context
518 *
519 * @return Backend deconvolution layer function
520 */
521template <typename DeconvolutionLayerFunction, typename TargetInfo>
522std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
523{
524 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
525
526 // Extract IO and info
527 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
528 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
529 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
530 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
531
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100532 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100533
534 // Create and configure function (we assume that functions have been validated before creation)
535 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
536 std::unique_ptr<IFunction> func;
537
538 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
539 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100540 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100541
542 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000543 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
544 << node.name()
545 << " Type: " << node.type()
546 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100547 << " Data Type: " << input->info()->data_type()
548 << " Input shape: " << input->info()->tensor_shape()
549 << " Weights shape: " << weights->info()->tensor_shape()
550 << " Output shape: " << output->info()->tensor_shape()
551 << std::endl);
552 return func;
553}
554
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100555/** Create a backend layer depth-wise convolution function
556 *
557 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
558 * @tparam TargetInfo Target-specific information
559 *
560 * @param[in] node Node to create the backend function for
561 *
562 * @return Backend depth-wise convolution layer function
563 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100564template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100565std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
566{
567 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
568
569 // Extract IO and info
570 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
571 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
572 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
573 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
574
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100575 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
576
577 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100578 {
579 biases->info()->set_data_type(DataType::S32);
580 }
581
Manuel Bottini05069f02019-09-26 17:18:26 +0100582 const PadStrideInfo conv_info = node.convolution_info();
583 const unsigned int depth_multiplier = node.depth_multiplier();
584 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100585
586 // Create and configure function (we assume that functions have been validated before creation)
587 std::unique_ptr<IFunction> func;
588 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100589
590 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
591 std::string("DepthwiseConvolutionLayer"),
592 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100593
594 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100595 std::ostringstream qss;
596 if(is_quantized)
597 {
598 qss << " Input QuantInfo: " << input->info()->quantization_info()
599 << " Weights QuantInfo: " << weights->info()->quantization_info()
600 << " Output QuantInfo: " << output->info()->quantization_info();
601 }
Pablo Tello32521432018-11-15 14:43:10 +0000602 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
603 << node.name()
604 << " Type: " << func_name
605 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100606 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100607 << " Input shape: " << input->info()->tensor_shape()
608 << " Weights shape: " << weights->info()->tensor_shape()
609 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000610 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000611 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100612 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100613 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000614 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100615}
616
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000617/** Create a backend dequantize layer function
618 *
619 * @tparam DequantizationLayer Function Backend dequantize function
620 * @tparam TargetInfo Target-specific information
621 *
622 * @param[in] node Node to create the backend function for
623 *
624 * @return Backend dequantize layer function
625 */
626template <typename DequantizationLayerFunction, typename TargetInfo>
627std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
628{
629 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
630
631 // Extract IO and info
632 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
633 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
634
635 ARM_COMPUTE_ERROR_ON(input == nullptr);
636 ARM_COMPUTE_ERROR_ON(output == nullptr);
637
638 // Create and configure function
639 auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
640 func->configure(input, output);
641
642 // Log info
643 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
644 << node.name()
645 << " Type: " << node.type()
646 << " Target: " << TargetInfo::TargetType
647 << " Data Type: " << input->info()->data_type()
648 << " Input shape: " << input->info()->tensor_shape()
649 << " Input quantization info: " << output->info()->quantization_info()
650 << " Output shape: " << output->info()->tensor_shape()
651 << std::endl);
652
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000653 return RETURN_UNIQUE_PTR(func);
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000654}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000655/** Create a backend detection output layer function
656 *
657 * @tparam DetectionOutputLayer Function Backend detection output function
658 * @tparam TargetInfo Target-specific information
659 *
660 * @param[in] node Node to create the backend function for
661 *
662 * @return Backend detection output layer function
663 */
664template <typename DetectionOutputLayerFunction, typename TargetInfo>
665std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
666{
667 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
668
669 // Extract IO and info
670 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
671 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
672 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
673 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
674 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
675
676 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
677 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
678 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
679 ARM_COMPUTE_ERROR_ON(output == nullptr);
680
681 // Create and configure function
682 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
683 func->configure(input0, input1, input2, output, detect_info);
684
685 // Log info
686 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
687 << node.name()
688 << " Type: " << node.type()
689 << " Target: " << TargetInfo::TargetType
690 << " Data Type: " << input0->info()->data_type()
691 << " Input0 shape: " << input0->info()->tensor_shape()
692 << " Input1 shape: " << input1->info()->tensor_shape()
693 << " Input2 shape: " << input2->info()->tensor_shape()
694 << " Output shape: " << output->info()->tensor_shape()
695 << " DetectionOutputLayer info: " << detect_info
696 << std::endl);
697
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000698 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000699}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000700
701/** Create a backend detection post process layer function
702 *
703 * @tparam DetectionPostProcessLayerFunction Backend detection output function
704 * @tparam TargetInfo Target-specific information
705 *
706 * @param[in] node Node to create the backend function for
707 *
708 * @return Backend detection post process layer function
709 */
710template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
711std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
712{
713 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
714
715 // Extract IO and info
716 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
717 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
718 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
719 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
720 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
721 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
722 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
723 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
724
725 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
726 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
727 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
728 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
729 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
730 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
731 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
732
733 // Create and configure function
734 auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
735 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
736
737 // Log info
738 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
739 << node.name()
740 << " Type: " << node.type()
741 << " Target: " << TargetInfo::TargetType
742 << " Data Type: " << input0->info()->data_type()
743 << " Input0 shape: " << input0->info()->tensor_shape()
744 << " Input1 shape: " << input1->info()->tensor_shape()
745 << " Input2 shape: " << input2->info()->tensor_shape()
746 << " Output0 shape: " << output0->info()->tensor_shape()
747 << " Output1 shape: " << output1->info()->tensor_shape()
748 << " Output2 shape: " << output2->info()->tensor_shape()
749 << " Output3 shape: " << output3->info()->tensor_shape()
750 << " DetectionPostProcessLayer info: " << detect_info
751 << std::endl);
752
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000753 return RETURN_UNIQUE_PTR(func);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000754}
755
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100756/** Create a backend element-wise operation layer function
757 *
758 * @tparam EltwiseFunctions Backend element-wise function
759 * @tparam TargetInfo Target-specific information
760 *
761 * @param[in] node Node to create the backend function for
762 *
763 * @return Backend element-wise operation layer function
764 */
765template <typename EltwiseFunctions, typename TargetInfo>
766std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
767{
768 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
769
770 // Extract IO and info
771 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
772 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
773 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
774 const EltwiseOperation eltwise_op = node.eltwise_operation();
775 const ConvertPolicy convert_policy = node.convert_policy();
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000776 const ActivationLayerInfo act_info = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100777 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
778 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
779 ARM_COMPUTE_ERROR_ON(output == nullptr);
780
781 std::unique_ptr<IFunction> func = nullptr;
782 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100783 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100784 {
785 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
786 std::string("ArithmeticAddition"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000787 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100788 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100789 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100790 {
791 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
792 std::string("ArithmeticSubtraction"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000793 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100794 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100795 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100796 {
797 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
798 std::string("PixelWiseMultiplication"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000799 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100800 }
801 else
802 {
803 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
804 }
805
806 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000807 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
808 << node.name()
809 << " Type: " << node.type()
810 << " Target: " << TargetInfo::TargetType
811 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100812 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000813 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100814 << std::endl);
815
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000816 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100817}
818
Sheri Zhang16dddd22020-05-27 15:03:48 +0100819/** Create a backend unary element-wise operation layer function
820 *
821 * @tparam UnaryEltwiseFunctions Backend unary element-wise function
822 * @tparam TargetInfo Target-specific information
823 *
824 * @param[in] node Node to create the backend function for
825 *
826 * @return Backend unary element-wise operation layer function
827 */
828template <typename UnaryEltwiseFunctions, typename TargetInfo>
829std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
830{
831 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
832
833 // Extract IO and info
834 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
835 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
836 const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
837
838 ARM_COMPUTE_ERROR_ON(input == nullptr);
839 ARM_COMPUTE_ERROR_ON(output == nullptr);
840
841 std::unique_ptr<IFunction> func = nullptr;
842 std::string func_name;
843 if(eltwise_op == UnaryEltwiseOperation::Exp)
844 {
845 std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
846 std::string("Exp"),
847 input, output);
848 }
849 else
850 {
851 ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
852 }
853
854 // Log info
855 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
856 << node.name()
857 << " Type: " << node.type()
858 << " Target: " << TargetInfo::TargetType
859 << " Operation: " << func_name
860 << " Data Type: " << input->info()->data_type()
861 << " Shape: " << input->info()->tensor_shape()
862 << std::endl);
863
864 return RETURN_UNIQUE_PTR(func);
865}
866
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100867/** Create a backend flatten layer function
868 *
869 * @tparam FlattenLayerFunction Backend flatten function
870 * @tparam TargetInfo Target-specific information
871 *
872 * @param[in] node Node to create the backend function for
873 *
874 * @return Backend flatten layer function
875 */
876template <typename FlattenLayerFunction, typename TargetInfo>
877std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
878{
879 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
880
881 // Extract IO and info
882 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
883 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
884
Georgios Pinitase2220552018-07-20 13:23:44 +0100885 ARM_COMPUTE_ERROR_ON(input == nullptr);
886 ARM_COMPUTE_ERROR_ON(output == nullptr);
887
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100888 // Create and configure function
889 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
890 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100891
892 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000893 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
894 << node.name()
895 << " Type: " << node.type()
896 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100897 << " Data Type: " << input->info()->data_type()
898 << " Input shape: " << input->info()->tensor_shape()
899 << " Output shape: " << output->info()->tensor_shape()
900 << std::endl);
901
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000902 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100903}
904
905/** Create a backend fully connected layer function
906 *
907 * @tparam FullyConnectedLayerFunction Backend fully-connected function
908 * @tparam TargetInfo Target-specific information
909 *
910 * @param[in] node Node to create the backend function for
911 * @param[in] ctx Graph context
912 *
913 * @return Backend fully connected layer function
914 */
915template <typename FullyConnectedLayerFunction, typename TargetInfo>
916std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
917{
918 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
919
920 // Extract IO and info
921 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
922 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
923 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
924 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100925 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100926
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100927 ARM_COMPUTE_ERROR_ON(input == nullptr);
928 ARM_COMPUTE_ERROR_ON(weights == nullptr);
929 ARM_COMPUTE_ERROR_ON(output == nullptr);
930
Georgios Pinitase2220552018-07-20 13:23:44 +0100931 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100932 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
933 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
934 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +0100935 func->configure(input, weights, biases, output, fc_info);
936
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100937 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
938
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100939 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100940 std::ostringstream qss;
941 if(is_quantized)
942 {
943 qss << " Input QuantInfo: " << input->info()->quantization_info()
944 << " Weights QuantInfo: " << weights->info()->quantization_info()
945 << " Output QuantInfo: " << output->info()->quantization_info();
946 }
Pablo Tello32521432018-11-15 14:43:10 +0000947 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
948 << node.name()
949 << " Type: " << node.type()
950 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100951 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100952 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100953 << " Input shape: " << input->info()->tensor_shape()
954 << " Weights shape: " << weights->info()->tensor_shape()
955 << " Output shape: " << output->info()->tensor_shape()
956 << std::endl);
957
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000958 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100959}
960
Manuel Bottini5209be52019-02-13 16:34:56 +0000961/** Create a backend generate proposals layer function
962 *
963 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
964 * @tparam TargetInfo Target-specific information
965 *
966 * @param[in] node Node to create the backend function for
967 * @param[in] ctx Graph context
968 *
969 * @return Backend generate proposals layer function
970 */
971template <typename GenerateProposalsLayerFunction, typename TargetInfo>
972std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
973{
974 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
975
976 // Extract IO and info
977 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
978 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
979 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
980 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
981 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
982 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
983 const GenerateProposalsInfo info = node.info();
984
985 ARM_COMPUTE_ERROR_ON(scores == nullptr);
986 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
987 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
988 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
989 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
990
991 // Create and configure function
992 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
993 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
994
995 // Log info
996 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
997 << " Target " << TargetInfo::TargetType
998 << " Data Type: " << scores->info()->data_type()
999 << " Scores shape: " << scores->info()->tensor_shape()
1000 << " Deltas shape: " << deltas->info()->tensor_shape()
1001 << " Anchors shape: " << anchors->info()->tensor_shape()
1002 << " Proposals shape: " << proposals->info()->tensor_shape()
1003 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1004 << " Scores Out shape: " << scores_out->info()->tensor_shape()
1005 << std::endl);
1006
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001007 return RETURN_UNIQUE_PTR(func);
Manuel Bottini5209be52019-02-13 16:34:56 +00001008}
1009
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001010/** Create a backend normalization layer function
1011 *
1012 * @tparam NormalizationLayerFunction Backend normalization function
1013 * @tparam TargetInfo Target-specific information
1014 *
1015 * @param[in] node Node to create the backend function for
1016 * @param[in] ctx Graph context
1017 *
1018 * @return Backend normalization layer function
1019 */
1020template <typename NormalizationLayerFunction, typename TargetInfo>
1021std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1022{
1023 ARM_COMPUTE_UNUSED(ctx);
1024
1025 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1026
1027 // Extract IO and info
1028 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1029 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1030 const NormalizationLayerInfo norm_info = node.normalization_info();
1031 ARM_COMPUTE_ERROR_ON(input == nullptr);
1032 ARM_COMPUTE_ERROR_ON(output == nullptr);
1033
1034 // Create and configure function
1035 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
1036 func->configure(input, output, norm_info);
1037
1038 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001039 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1040 << node.name()
1041 << " Type: " << node.type()
1042 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001043 << " Data Type: " << input->info()->data_type()
1044 << " Input shape: " << input->info()->tensor_shape()
1045 << " Output shape: " << output->info()->tensor_shape()
1046 << " Normalization info: " << norm_info.type()
1047 << std::endl);
1048
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001049 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001050}
1051
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001052/** Create a backend normalize planar YUV layer function
1053 *
1054 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1055 * @tparam TargetInfo Target-specific information
1056 *
1057 * @param[in] node Node to create the backend function for
1058 *
1059 * @return Backend normalize plnar YUV layer function
1060 */
1061template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1062std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1063{
1064 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1065
1066 // Extract IO and info
1067 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1068 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1069 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1070 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1071 ARM_COMPUTE_ERROR_ON(input == nullptr);
1072 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1073 ARM_COMPUTE_ERROR_ON(std == nullptr);
1074 ARM_COMPUTE_ERROR_ON(output == nullptr);
1075
1076 // Create and configure function
1077 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1078 func->configure(input, output, mean, std);
1079
1080 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001081 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1082 << node.name()
1083 << " Type: " << node.type()
1084 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001085 << " Data Type: " << input->info()->data_type()
1086 << " Shape: " << input->info()->tensor_shape()
1087 << std::endl);
1088
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001089 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001090}
1091
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001092/** Create a backend pad layer function
1093 *
1094 * @tparam PadLayerFunction Backend pad function
1095 * @tparam TargetInfo Target-specific information
1096 *
1097 * @param[in] node Node to create the backend function for
1098 *
1099 * @return Backend pad layer function
1100 */
1101template <typename PadLayerFunction, typename TargetInfo>
1102std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1103{
1104 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1105
1106 // Extract IO and info
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001107 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1108 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1109 const PaddingList &padding = node.padding();
1110 const PixelValue pad_value = node.pad_value();
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001111 ARM_COMPUTE_ERROR_ON(input == nullptr);
1112 ARM_COMPUTE_ERROR_ON(output == nullptr);
1113
1114 // Create and configure function
1115 auto func = support::cpp14::make_unique<PadLayerFunction>();
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001116 func->configure(input, output, padding, pad_value);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001117
1118 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001119 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1120 << node.name()
1121 << " Type: " << node.type()
1122 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001123 << " Data Type: " << input->info()->data_type()
1124 << " Input shape: " << input->info()->tensor_shape()
1125 << " Output shape: " << output->info()->tensor_shape()
1126 << std::endl);
1127
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001128 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001129}
1130
Georgios Pinitas57c48242018-08-02 13:41:49 +01001131/** Create a backend permute layer function
1132 *
1133 * @tparam PermuteLayerFunction Backend permute function
1134 * @tparam TargetInfo Target-specific information
1135 *
1136 * @param[in] node Node to create the backend function for
1137 *
1138 * @return Backend permute layer function
1139 */
1140template <typename PermuteLayerFunction, typename TargetInfo>
1141std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1142{
1143 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1144
1145 // Extract IO and info
1146 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1147 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1148 const PermutationVector &perm = node.permutation_vector();
1149 ARM_COMPUTE_ERROR_ON(input == nullptr);
1150 ARM_COMPUTE_ERROR_ON(output == nullptr);
1151
1152 // Create and configure function
1153 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1154 func->configure(input, output, perm);
1155
1156 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001157 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1158 << node.name()
1159 << " Type: " << node.type()
1160 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001161 << " Data Type: " << input->info()->data_type()
1162 << " Input shape: " << input->info()->tensor_shape()
1163 << " Output shape: " << output->info()->tensor_shape()
1164 << " Permutation vector: " << perm
1165 << std::endl);
1166
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001167 return RETURN_UNIQUE_PTR(func);
Georgios Pinitas57c48242018-08-02 13:41:49 +01001168}
1169
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001170/** Create a backend pooling layer function
1171 *
1172 * @tparam PoolingLayerFunction Backend pooling function
1173 * @tparam TargetInfo Target-specific information
1174 *
1175 * @param[in] node Node to create the backend function for
1176 *
1177 * @return Backend pooling layer function
1178 */
1179template <typename PoolingLayerFunction, typename TargetInfo>
1180std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1181{
1182 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1183
1184 // Extract IO and info
1185 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1186 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1187 const PoolingLayerInfo pool_info = node.pooling_info();
1188 ARM_COMPUTE_ERROR_ON(input == nullptr);
1189 ARM_COMPUTE_ERROR_ON(output == nullptr);
1190
1191 // Create and configure function
1192 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1193 func->configure(input, output, pool_info);
1194
1195 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001196 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1197 << node.name()
1198 << " Type: " << node.type()
1199 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001200 << " Data Type: " << input->info()->data_type()
1201 << " Input shape: " << input->info()->tensor_shape()
1202 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001203 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001204 << std::endl);
1205
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001206 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001207}
1208
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001209/** Create a backend PRelu layer function
1210 *
1211 * @tparam PReluFunction Backend PRelu function
1212 * @tparam TargetInfo Target-specific information
1213 *
1214 * @param[in] node Node to create the backend function for
1215 *
1216 * @return Backend PRelu layer function
1217 */
1218template <typename PReluFunction, typename TargetInfo>
1219std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1220{
1221 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1222
1223 // Extract IO and info
1224 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1225 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1226 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1227 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1228 ARM_COMPUTE_ERROR_ON(output == nullptr);
1229
1230 // Create and configure function
1231 auto func = support::cpp14::make_unique<PReluFunction>();
1232 func->configure(input, alpha, output);
1233
1234 // Log info
1235 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1236 << node.name()
1237 << " Type: " << node.type()
1238 << " Target: " << TargetInfo::TargetType
1239 << " Data Type: " << input->info()->data_type()
1240 << " Input shape: " << input->info()->tensor_shape()
1241 << " Output shape: " << output->info()->tensor_shape()
1242 << std::endl);
1243
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001244 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001245}
1246
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001247/** Create a backend print layer function
1248 *
1249 * @tparam TargetInfo Target-specific information
1250 *
1251 * @param[in] node Node to create the backend function for
1252 *
1253 * @return Backend print layer function
1254 */
1255template <typename TargetInfo>
1256std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1257{
1258 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1259
1260 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1261 ARM_COMPUTE_ERROR_ON(input == nullptr);
1262 ARM_COMPUTE_UNUSED(input);
1263
1264 // Log info
1265 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1266 << node.name()
1267 << " Type: " << node.type()
1268 << " Target: " << TargetInfo::TargetType
1269 << " Data Type: " << input->info()->data_type()
1270 << " Input shape: " << input->info()->tensor_shape()
1271 << std::endl);
1272
1273 return nullptr;
1274}
1275
Pablo Tello32521432018-11-15 14:43:10 +00001276/** Create a backend priorbox layer function
1277 *
1278 * @tparam PriorBoxLayerFunction Backend priorbox function
1279 * @tparam TargetInfo Target-specific information
1280 *
1281 * @param[in] node Node to create the backend function for
1282 *
1283 * @return Backend priorbox layer function
1284 */
1285template <typename PriorBoxLayerFunction, typename TargetInfo>
1286std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1287{
1288 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1289
1290 // Extract IO and info
1291 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1292 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1293 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1294 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1295 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1296 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1297 ARM_COMPUTE_ERROR_ON(output == nullptr);
1298
1299 // Create and configure function
1300 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1301 func->configure(input0, input1, output, prior_info);
1302
1303 // Log info
1304 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1305 << node.name()
1306 << " Type: " << node.type()
1307 << " Target: " << TargetInfo::TargetType
1308 << " Data Type: " << input0->info()->data_type()
1309 << " Input0 shape: " << input0->info()->tensor_shape()
1310 << " Input1 shape: " << input1->info()->tensor_shape()
1311 << " Output shape: " << output->info()->tensor_shape()
1312 << " PriorBoxLayer info: " << prior_info
1313 << std::endl);
1314
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001315 return RETURN_UNIQUE_PTR(func);
Pablo Tello32521432018-11-15 14:43:10 +00001316}
1317
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001318/** Create a backend quantization layer function
1319 *
1320 * @tparam QuantizationLayerFunction Backend quantization function
1321 * @tparam TargetInfo Target-specific information
1322 *
1323 * @param[in] node Node to create the backend function for
1324 *
1325 * @return Backend quantization layer function
1326 */
1327template <typename QuantizationLayerFunction, typename TargetInfo>
1328std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1329{
1330 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1331
1332 // Extract IO and info
1333 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1334 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1335 ARM_COMPUTE_ERROR_ON(input == nullptr);
1336 ARM_COMPUTE_ERROR_ON(output == nullptr);
1337
1338 // Create and configure function
1339 auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1340 func->configure(input, output);
1341
1342 // Log info
1343 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1344 << node.name()
1345 << " Type: " << node.type()
1346 << " Target: " << TargetInfo::TargetType
1347 << " Data Type: " << input->info()->data_type()
1348 << " Input shape: " << input->info()->tensor_shape()
1349 << " Output shape: " << output->info()->tensor_shape()
1350 << std::endl);
1351
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001352 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001353}
1354
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001355/** Create a backend reorg layer function
1356 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001357 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001358 * @tparam TargetInfo Target-specific information
1359 *
1360 * @param[in] node Node to create the backend function for
1361 *
1362 * @return Backend reshape layer function
1363 */
1364template <typename ReorgLayerFunction, typename TargetInfo>
1365std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1366{
1367 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1368
1369 // Extract IO and info
1370 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1371 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1372 ARM_COMPUTE_ERROR_ON(input == nullptr);
1373 ARM_COMPUTE_ERROR_ON(output == nullptr);
1374
1375 // Create and configure function
1376 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1377 func->configure(input, output, node.stride());
1378
1379 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001380 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1381 << node.name()
1382 << " Type: " << node.type()
1383 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001384 << " Data Type: " << input->info()->data_type()
1385 << " Input shape: " << input->info()->tensor_shape()
1386 << " Output shape: " << output->info()->tensor_shape()
1387 << std::endl);
1388
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001389 return RETURN_UNIQUE_PTR(func);
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001390}
1391
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001392/** Create a backend reshape layer function
1393 *
1394 * @tparam ReshapeLayerFunction Backend reshape function
1395 * @tparam TargetInfo Target-specific information
1396 *
1397 * @param[in] node Node to create the backend function for
1398 *
1399 * @return Backend reshape layer function
1400 */
1401template <typename ReshapeLayerFunction, typename TargetInfo>
1402std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1403{
1404 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1405
1406 // Extract IO and info
1407 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1408 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1409 ARM_COMPUTE_ERROR_ON(input == nullptr);
1410 ARM_COMPUTE_ERROR_ON(output == nullptr);
1411
1412 // Create and configure function
1413 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1414 func->configure(input, output);
1415
1416 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001417 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1418 << node.name()
1419 << " Type: " << node.type()
1420 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001421 << " Data Type: " << input->info()->data_type()
1422 << " Input shape: " << input->info()->tensor_shape()
1423 << " Output shape: " << output->info()->tensor_shape()
1424 << std::endl);
1425
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001426 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001427}
1428
1429/** Create a backend resize layer function
1430 *
1431 * @tparam ResizeLayerFunction Backend resize function
1432 * @tparam TargetInfo Target-specific information
1433 *
1434 * @param[in] node Node to create the backend function for
1435 *
1436 * @return Backend resize layer function
1437 */
1438template <typename ResizeLayerFunction, typename TargetInfo>
1439std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1440{
1441 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1442
1443 // Extract IO and info
1444 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1445 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1446 ARM_COMPUTE_ERROR_ON(input == nullptr);
1447 ARM_COMPUTE_ERROR_ON(output == nullptr);
1448 const InterpolationPolicy policy = node.policy();
1449
1450 // Create and configure function
1451 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
Sang-Hoon Parkccd94962020-06-09 12:09:24 +01001452 func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT });
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001453
1454 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001455 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1456 << node.name()
1457 << " Type: " << node.type()
1458 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001459 << " Data Type: " << input->info()->data_type()
1460 << " Input shape: " << input->info()->tensor_shape()
1461 << " Output shape: " << output->info()->tensor_shape()
1462 << " Interpolation: " << policy
1463 << std::endl);
1464
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001465 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001466}
1467
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001468/** Create a backend ROI align layer function
1469 *
1470 * @tparam ROIAlignLayerFunction ROI Align function
1471 * @tparam TargetInfo Target-specific information
1472 *
1473 * @param[in] node Node to create the backend function for
1474 *
1475 * @return ROI Align layer function
1476 */
1477template <typename ROIAlignLayerFunction, typename TargetInfo>
1478std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1479{
1480 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1481
1482 // Extract IO and info
1483 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1484 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1485 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1486 ARM_COMPUTE_ERROR_ON(input == nullptr);
1487 ARM_COMPUTE_ERROR_ON(output == nullptr);
1488 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1489
1490 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1491
1492 // Create and configure function
1493 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1494
1495 func->configure(input, rois, output, pool_info);
1496
1497 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001498 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1499 << node.name()
1500 << " Type: " << node.type()
1501 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001502 << " Data Type: " << input->info()->data_type()
1503 << " Input shape: " << input->info()->tensor_shape()
1504 << " Output shape: " << output->info()->tensor_shape()
1505 << " ROIs shape: " << rois->info()->tensor_shape()
1506 << " ROIPooling width: " << pool_info.pooled_width()
1507 << " ROIPooling height: " << pool_info.pooled_height()
1508 << std::endl);
1509
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001510 return RETURN_UNIQUE_PTR(func);
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001511}
1512
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001513/** Create a backend slice layer function
1514 *
1515 * @tparam SliceLayerFunction Backend slice function
1516 * @tparam TargetInfo Target-specific information
1517 *
1518 * @param[in] node Node to create the backend function for
1519 *
1520 * @return Backend slice layer function
1521 */
1522template <typename SliceLayerFunction, typename TargetInfo>
1523std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1524{
1525 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1526
1527 // Extract IO and info
1528 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1529 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1530 ARM_COMPUTE_ERROR_ON(input == nullptr);
1531 ARM_COMPUTE_ERROR_ON(output == nullptr);
1532
1533 // Create and configure function
1534 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1535 func->configure(input, output, node.starts(), node.ends());
1536
1537 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001538 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1539 << node.name()
1540 << " Type: " << node.type()
1541 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001542 << " Data Type: " << input->info()->data_type()
1543 << " Input shape: " << input->info()->tensor_shape()
1544 << " Output shape: " << output->info()->tensor_shape()
1545 << std::endl);
1546
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001547 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001548}
1549
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001550/** Create a backend softmax layer function
1551 *
1552 * @tparam SoftmaxLayerFunction Backend softmax function
1553 * @tparam TargetInfo Target-specific information
1554 *
1555 * @param[in] node Node to create the backend function for
1556 * @param[in] ctx Graph context
1557 *
1558 * @return Backend softmax layer function
1559 */
1560template <typename SoftmaxLayerFunction, typename TargetInfo>
1561std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1562{
1563 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1564
1565 // Extract IO and info
1566 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1567 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1568 const float beta = node.beta();
1569 ARM_COMPUTE_ERROR_ON(input == nullptr);
1570 ARM_COMPUTE_ERROR_ON(output == nullptr);
1571
1572 // Create and configure function
1573 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1574 func->configure(input, output, beta);
1575
1576 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001577 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1578 << node.name()
1579 << " Type: " << node.type()
1580 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001581 << " Data Type: " << input->info()->data_type()
1582 << " Input shape: " << input->info()->tensor_shape()
1583 << " Output shape: " << output->info()->tensor_shape()
1584 << std::endl);
1585
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001586 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001587}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001588
1589/** Create a backend layer stack function
1590 *
1591 * @tparam StackLayerFunction Backend stack function
1592 * @tparam TargetInfo Target-specific information
1593 *
1594 * @param[in] node Node to create the backend function for
1595 *
1596 * @return Backend stack layer function
1597 */
1598template <typename StackLayerFunction, typename TargetInfo>
1599std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1600{
1601 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1602 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1603
1604 // Extract IO and info
1605 std::vector<typename TargetInfo::TensorType *> inputs;
1606 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1607 {
1608 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1609 }
1610 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1611 const int axis = node.axis();
1612
1613 // Create and configure function
1614 auto func = support::cpp14::make_unique<StackLayerFunction>();
1615 func->configure(inputs, axis, output);
1616
1617 // Log info
1618 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1619 << node.name()
1620 << " Type: " << node.type()
1621 << " Target: " << TargetInfo::TargetType
1622 << " Data Type: " << output->info()->data_type()
1623 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1624 << " Output shape: " << output->info()->tensor_shape()
1625 << " Num Inputs: " << inputs.size()
1626 << " Axis: " << axis
1627 << std::endl);
1628
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001629 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioec699752019-03-22 15:25:32 +00001630}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001631/** Create a backend Upsample layer function
1632 *
1633 * @tparam UpsampleLayerFunction Backend Upsample function
1634 * @tparam TargetInfo Target-specific information
1635 *
1636 * @param[in] node Node to create the backend function for
1637 * @param[in] ctx Graph context
1638 *
1639 * @return Backend Upsample layer function
1640 */
1641template <typename UpsampleLayerFunction, typename TargetInfo>
1642std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1643{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001644 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001645 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1646
1647 // Extract IO and info
1648 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1649 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1650 const Size2D info = node.info();
1651 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1652 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1653 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1654 ARM_COMPUTE_ERROR_ON(input == nullptr);
1655 ARM_COMPUTE_ERROR_ON(output == nullptr);
1656
1657 // Create and configure function
1658 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1659 func->configure(input, output, info, upsampling_policy);
1660
1661 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001662 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1663 << node.name()
1664 << " Type: " << node.type()
1665 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001666 << " Data Type: " << input->info()->data_type()
1667 << " Input shape: " << input->info()->tensor_shape()
1668 << " Output shape: " << output->info()->tensor_shape()
1669 << " Strides: " << info
1670 << " Upsampling policy: " << upsampling_policy
1671 << std::endl);
1672
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001673 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001674}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001675/** Create a backend YOLO layer function
1676 *
1677 * @tparam YoloLayerFunction Backend YOLO function
1678 * @tparam TargetInfo Target-specific information
1679 *
1680 * @param[in] node Node to create the backend function for
1681 * @param[in] ctx Graph context
1682 *
1683 * @return Backend YOLO layer function
1684 */
1685template <typename YOLOlayerFunction, typename TargetInfo>
1686std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1687{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001688 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001689 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1690
1691 // Extract IO and info
1692 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1693 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1694 const ActivationLayerInfo act_info = node.activation_info();
1695 const int32_t num_classes = node.num_classes();
1696 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1697 ARM_COMPUTE_ERROR_ON(input == nullptr);
1698 ARM_COMPUTE_ERROR_ON(output == nullptr);
1699
1700 // Create and configure function
1701 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1702 func->configure(input, output, act_info, num_classes);
1703
1704 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001705 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1706 << node.name()
1707 << " Type: " << node.type()
1708 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001709 << " Data Type: " << input->info()->data_type()
1710 << " Input shape: " << input->info()->tensor_shape()
1711 << " Output shape: " << output->info()->tensor_shape()
1712 << " Activation function: " << act_info.activation()
1713 << " Num classes: " << num_classes
1714 << std::endl);
1715
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001716 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001717}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001718} // namespace detail
1719} // namespace backends
1720} // namespace graph
1721} // namespace arm_compute
1722
Michalis Spyrouf4643372019-11-29 16:17:13 +00001723#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */