blob: 570e981e985cb3011e2280b6817d03f6a8085ce0 [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010031#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000032#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010033#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010034#include "arm_compute/graph/backends/Utils.h"
35#include "arm_compute/graph/nodes/Nodes.h"
36
37#include "arm_compute/core/Error.h"
38#include "arm_compute/core/Helpers.h"
39#include "arm_compute/core/ITensorInfo.h"
40#include "arm_compute/core/utils/misc/Cast.h"
41
42namespace arm_compute
43{
44namespace graph
45{
46namespace backends
47{
48namespace detail
49{
Georgios Pinitas0b192e82020-02-20 17:09:28 +000050// Address rule DR-9R5 (1579. Return by converting move constructor)
51#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5))
52#define RETURN_UNIQUE_PTR(x) (x)
53#else /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
54#define RETURN_UNIQUE_PTR(x) (std::move(x))
55#endif /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
56
Georgios Pinitasda2491f2018-06-01 17:49:09 +010057/** Returns backing tensor of a given tensor
58 *
59 * @tparam TargetInfo Target information
60 *
61 * @param[in] tensor Tensor to extract the backing tensor from
62 *
63 * @return Backing tensor if present else nullptr
64 */
65template <typename TargetInfo>
66typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
67{
68 typename TargetInfo::TensorType *backing_tensor = nullptr;
69 if(tensor != nullptr)
70 {
71 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
72 // Get backing tensor handle
73 ITensorHandle *tensor_handle = tensor->handle();
74 // Get backing tensor
75 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
76 }
77
78 return backing_tensor;
79}
80
81template <typename TargetInfo>
82void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
83{
84 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000085 << " Target: " << TargetInfo::TargetType
86 << " ID: " << node.id()
87 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010088 << std::endl);
89
90 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
91 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
92 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010093 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010094}
95
96/** Creates a backend activation layer function
97 *
98 * @tparam ActivationLayerFunction Backend activation function
99 * @tparam TargetInfo Target-specific information
100 *
101 * @param[in] node Node to create the backend function for
102 *
103 * @return Backend activation layer function
104 */
105template <typename ActivationLayerFunction, typename TargetInfo>
106std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
107{
108 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
109
110 // Extract IO and info
111 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
112 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
113 const ActivationLayerInfo act_info = node.activation_info();
114
115 // Create function
116 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
117 func->configure(input, output, act_info);
118
Pablo Tello32521432018-11-15 14:43:10 +0000119 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
120 << node.name()
121 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000122 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100123 << " Data Type: " << input->info()->data_type()
124 << " Shape: " << input->info()->tensor_shape()
125 << " Activation function: " << act_info.activation()
126 << " a: " << act_info.a()
127 << " b: " << act_info.b()
128 << " InPlace : " << is_in_place_operation(input, output)
129 << std::endl);
130
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000131 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100132}
133
134/** Create a backend batch normalization layer function
135 *
136 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
137 * @tparam TargetInfo Target-specific information
138 *
139 * @param[in] node Node to create the backend function for
140 *
141 * @return Backend batch normalization layer function
142 */
143template <typename BatchNormalizationLayerFunction, typename TargetInfo>
144std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
145{
146 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
147
148 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000149 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
150 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
151 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
152 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
153 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
154
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100155 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
156 const float epsilon = node.epsilon();
157 const ActivationLayerInfo fused_act = node.fused_activation();
158
159 // Create and configure function
160 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
161 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
162
163 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000164 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
165 << node.name()
166 << " Type: " << node.type()
167 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100168 << " Data Type: " << input->info()->data_type()
169 << " Shape: " << input->info()->tensor_shape()
170 << " Epsilon: " << epsilon << " "
171 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000172 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100173 << std::endl);
174
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000175 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100176}
177
giuros01acce5042019-02-21 17:32:34 +0000178/** Create a backend batch normalization layer function
179 *
180 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
181 * @tparam TargetInfo Target-specific information
182 *
183 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000184 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000185 *
186 * @return Backend batch normalization layer function
187 */
188template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000189std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000190{
191 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
192
193 // Extract IO and info
194 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
195 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
196 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
197 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
198 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
199 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
200 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
201
202 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
203
204 const PadStrideInfo conv_info = node.convolution_info();
205 const unsigned int num_groups = node.num_groups();
206 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
207 const ActivationLayerInfo fused_act = node.fused_activation();
208 const float epsilon = node.epsilon();
209
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000210 // Create and configure function (we assume that functions have been validated before creation)
211 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
212 std::unique_ptr<IFunction> func;
213 std::string func_name;
214
215 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
216
giuros01acce5042019-02-21 17:32:34 +0000217 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000218 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
219 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000220
221 // Log info
222 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
223 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100224 << " Type: " << node.type()
225 << " Target: " << TargetInfo::TargetType
226 << " Data Type: " << input->info()->data_type()
227 << " Input shape: " << input->info()->tensor_shape()
228 << " Weights shape: " << weights->info()->tensor_shape()
229 << " Output shape: " << output->info()->tensor_shape()
230 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
231 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000232 return RETURN_UNIQUE_PTR(func);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100233}
234
235/** Create a backend fused depthwise convolution batch normalization layer function
236 *
237 * @tparam FusedLayerTypes Fused layer types
238 * @tparam TargetInfo Target-specific information
239 *
240 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000241 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100242 *
243 * @return Backend fused depthwise convolution batch normalization layer function
244 */
245template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000246std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100247{
248 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
249
250 // Extract IO and info
251 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
252 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
253 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
254 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
255 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
256 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
257 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
258
259 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
260
261 const PadStrideInfo conv_info = node.convolution_info();
262 const unsigned int depth_multiplier = node.depth_multiplier();
263 const ActivationLayerInfo fused_act = node.fused_activation();
264 const float epsilon = node.epsilon();
265
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000266 // Create and configure function (we assume that functions have been validated before creation)
267 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
268 std::unique_ptr<IFunction> func;
269 std::string func_name;
270
271 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
272
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100273 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000274 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
275 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100276
277 // Log info
278 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
279 << node.name()
280 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000281 << " Target: " << TargetInfo::TargetType
282 << " Data Type: " << input->info()->data_type()
283 << " Input shape: " << input->info()->tensor_shape()
284 << " Weights shape: " << weights->info()->tensor_shape()
285 << " Output shape: " << output->info()->tensor_shape()
286 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
287 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000288 return RETURN_UNIQUE_PTR(func);
giuros01acce5042019-02-21 17:32:34 +0000289}
290
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100291/** Create a backend bounding box transform layer function
292 *
293 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
294 * @tparam TargetInfo Target-specific information
295 *
296 * @param[in] node Node to create the backend function for
297 *
298 * @return Backend bounding box transform layer function
299 */
300template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
301std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
302{
303 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
304
305 // Extract IO and info
306 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
307 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
308 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
309 const BoundingBoxTransformInfo bbox_info = node.info();
310
311 // Create and configure function
312 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
313 func->configure(input, output, deltas, bbox_info);
314
315 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000316 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
317 << node.name()
318 << " Type: " << node.type()
319 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100320 << " Data Type: " << input->info()->data_type()
321 << " Shape: " << input->info()->tensor_shape()
322 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
323 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
324 << std::endl);
325
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000326 return RETURN_UNIQUE_PTR(func);
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100327}
328
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100329/** Create a backend channel shuffle layer function
330 *
331 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
332 * @tparam TargetInfo Target-specific information
333 *
334 * @param[in] node Node to create the backend function for
335 *
336 * @return Backend channel shuffle layer function
337 */
338template <typename ChannelShuffleLayerFunction, typename TargetInfo>
339std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
340{
341 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
342
343 // Extract IO and info
344 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
345 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
346 const unsigned int num_groups = node.num_groups();
347
348 // Create function
349 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
350 func->configure(input, output, num_groups);
351
Pablo Tello32521432018-11-15 14:43:10 +0000352 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
353 << node.name()
354 << " Type: " << node.type()
355 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100356 << " Data Type: " << input->info()->data_type()
357 << " Shape: " << input->info()->tensor_shape()
358 << " Num groups: " << num_groups
359 << std::endl);
360
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000361 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362}
363
Georgios Pinitase2220552018-07-20 13:23:44 +0100364/** Create a backend layer concatenate function
365 *
366 * @tparam ConcatenateLayerFunction Backend concatenate function
367 * @tparam TargetInfo Target-specific information
368 *
369 * @param[in] node Node to create the backend function for
370 *
371 * @return Backend concatenate layer function
372 */
373template <typename ConcatenateLayerFunction, typename TargetInfo>
374std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
375{
376 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
377 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
378
379 // Return nullptr if depth concatenate is switched off
380 if(!node.is_enabled())
381 {
382 return nullptr;
383 }
384
385 // Extract IO and info
Georgios Pinitas4667ddd2020-07-13 21:21:33 +0100386 std::vector<typename TargetInfo::SrcTensorType *> inputs;
Georgios Pinitase2220552018-07-20 13:23:44 +0100387 for(unsigned int i = 0; i < node.num_inputs(); ++i)
388 {
389 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
390 }
391 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100392 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
393 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100394
395 // Create and configure function
396 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
397 func->configure(inputs, output, concat_axis);
398
399 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000400 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
401 std::ostringstream qss;
402 if(is_quantized)
403 {
404 qss << " Output QuantInfo: " << output->info()->quantization_info();
405 }
Pablo Tello32521432018-11-15 14:43:10 +0000406 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
407 << node.name()
408 << " Type: " << node.type()
409 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100410 << " Data Type: " << output->info()->data_type()
411 << " Shape: " << output->info()->tensor_shape()
412 << " Num Inputs: " << inputs.size()
413 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000414 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100415 << std::endl);
416
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000417 return RETURN_UNIQUE_PTR(func);
Georgios Pinitase2220552018-07-20 13:23:44 +0100418}
419
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100420/** Create a backend convolution layer function
421 *
422 * @tparam ConvolutionLayerFunctions Backend convolution functions
423 * @tparam TargetInfo Target-specific information
424 *
425 * @param[in] node Node to create the backend function for
426 * @param[in] ctx Graph context
427 *
428 * @return Backend convolution layer function
429 */
430template <typename ConvolutionLayerFunctions, typename TargetInfo>
431std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
432{
433 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
434
435 // Extract IO and info
436 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
437 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
438 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
439 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
440
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100441 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
442
443 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100444 {
445 biases->info()->set_data_type(DataType::S32);
446 }
447
Georgios Pinitas08346e92018-10-16 19:10:46 +0100448 const PadStrideInfo conv_info = node.convolution_info();
449 const unsigned int num_groups = node.num_groups();
450 const ConvolutionMethod conv_algorithm = node.convolution_method();
451 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
452 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453
454 // Create and configure function (we assume that functions have been validated before creation)
455 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
456 std::unique_ptr<IFunction> func;
457 std::string func_name;
458
Georgios Pinitase2220552018-07-20 13:23:44 +0100459 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100460 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100461 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100462 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
463 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100464 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100466 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100467 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100468 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100469 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
470 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100471 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100472 }
473 else if(conv_algorithm == ConvolutionMethod::GEMM)
474 {
475 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
476 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100477 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100478 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100479 }
480 else
481 {
482 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
483 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100484 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100485 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100486 }
487
488 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100489 std::ostringstream qss;
490 if(is_quantized)
491 {
492 qss << " Input QuantInfo: " << input->info()->quantization_info()
493 << " Weights QuantInfo: " << weights->info()->quantization_info()
494 << " Output QuantInfo: " << output->info()->quantization_info();
495 }
Pablo Tello32521432018-11-15 14:43:10 +0000496 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
497 << node.name()
498 << " Type: " << func_name
499 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100501 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100502 << " Input shape: " << input->info()->tensor_shape()
503 << " Weights shape: " << weights->info()->tensor_shape()
504 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000505 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100506 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100507 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000508 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100509}
510
511/** Create a backend deconvolution layer function
512 *
513 * @tparam DeconvolutionLayerFunction Backend deconvolution function
514 * @tparam TargetInfo Target-specific information
515 *
516 * @param[in] node Node to create the backend function for
517 * @param[in] ctx Graph context
518 *
519 * @return Backend deconvolution layer function
520 */
521template <typename DeconvolutionLayerFunction, typename TargetInfo>
522std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
523{
524 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
525
526 // Extract IO and info
527 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
528 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
529 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
530 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
531
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100532 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100533
534 // Create and configure function (we assume that functions have been validated before creation)
535 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
536 std::unique_ptr<IFunction> func;
537
538 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
539 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100540 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100541
542 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000543 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
544 << node.name()
545 << " Type: " << node.type()
546 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100547 << " Data Type: " << input->info()->data_type()
548 << " Input shape: " << input->info()->tensor_shape()
549 << " Weights shape: " << weights->info()->tensor_shape()
550 << " Output shape: " << output->info()->tensor_shape()
551 << std::endl);
552 return func;
553}
554
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100555/** Create a backend layer depth-wise convolution function
556 *
557 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
558 * @tparam TargetInfo Target-specific information
559 *
560 * @param[in] node Node to create the backend function for
561 *
562 * @return Backend depth-wise convolution layer function
563 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100564template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100565std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
566{
567 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
568
569 // Extract IO and info
570 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
571 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
572 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
573 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
574
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100575 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
576
577 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100578 {
579 biases->info()->set_data_type(DataType::S32);
580 }
581
Manuel Bottini05069f02019-09-26 17:18:26 +0100582 const PadStrideInfo conv_info = node.convolution_info();
583 const unsigned int depth_multiplier = node.depth_multiplier();
584 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100585
586 // Create and configure function (we assume that functions have been validated before creation)
587 std::unique_ptr<IFunction> func;
588 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100589
590 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
591 std::string("DepthwiseConvolutionLayer"),
592 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100593
594 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100595 std::ostringstream qss;
596 if(is_quantized)
597 {
598 qss << " Input QuantInfo: " << input->info()->quantization_info()
599 << " Weights QuantInfo: " << weights->info()->quantization_info()
600 << " Output QuantInfo: " << output->info()->quantization_info();
601 }
Pablo Tello32521432018-11-15 14:43:10 +0000602 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
603 << node.name()
604 << " Type: " << func_name
605 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100606 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100607 << " Input shape: " << input->info()->tensor_shape()
608 << " Weights shape: " << weights->info()->tensor_shape()
609 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000610 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000611 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100612 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100613 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000614 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100615}
616
thecha010a05e6a2020-08-28 18:40:38 +0100617/** Create a backend depth to space layer function
618 *
619 * @tparam DepthToSpaceLayerNode Function Backend depth to space function
620 * @tparam TargetInfo Target-specific information
621 *
622 * @param[in] node Node to create the backend function for
623 *
624 * @return Backend depth to space layer function
625 */
626template <typename DepthToSpaceLayerFunction, typename TargetInfo>
627std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &node)
628{
629 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
630
631 // Extract IO and info
632 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
633 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
634
635 ARM_COMPUTE_ERROR_ON(input == nullptr);
636 ARM_COMPUTE_ERROR_ON(output == nullptr);
637
638 // Create and configure function
639 auto func = support::cpp14::make_unique<DepthToSpaceLayerFunction>();
640 func->configure(input, output, node.block_shape());
641
642 // Log info
643 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
644 << node.name()
645 << " Type: " << node.type()
646 << " Target: " << TargetInfo::TargetType
647 << " Data Type: " << input->info()->data_type()
648 << " Input shape: " << input->info()->tensor_shape()
649 << " Block Size: " << node.block_shape()
650 << " Output shape: " << output->info()->tensor_shape()
651 << std::endl);
652
653 return RETURN_UNIQUE_PTR(func);
654}
655
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000656/** Create a backend dequantize layer function
657 *
658 * @tparam DequantizationLayer Function Backend dequantize function
659 * @tparam TargetInfo Target-specific information
660 *
661 * @param[in] node Node to create the backend function for
662 *
663 * @return Backend dequantize layer function
664 */
665template <typename DequantizationLayerFunction, typename TargetInfo>
666std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
667{
668 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
669
670 // Extract IO and info
671 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
672 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
673
674 ARM_COMPUTE_ERROR_ON(input == nullptr);
675 ARM_COMPUTE_ERROR_ON(output == nullptr);
676
677 // Create and configure function
678 auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
679 func->configure(input, output);
680
681 // Log info
682 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
683 << node.name()
684 << " Type: " << node.type()
685 << " Target: " << TargetInfo::TargetType
686 << " Data Type: " << input->info()->data_type()
687 << " Input shape: " << input->info()->tensor_shape()
688 << " Input quantization info: " << output->info()->quantization_info()
689 << " Output shape: " << output->info()->tensor_shape()
690 << std::endl);
691
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000692 return RETURN_UNIQUE_PTR(func);
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000693}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000694/** Create a backend detection output layer function
695 *
696 * @tparam DetectionOutputLayer Function Backend detection output function
697 * @tparam TargetInfo Target-specific information
698 *
699 * @param[in] node Node to create the backend function for
700 *
701 * @return Backend detection output layer function
702 */
703template <typename DetectionOutputLayerFunction, typename TargetInfo>
704std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
705{
706 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
707
708 // Extract IO and info
709 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
710 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
711 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
712 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
713 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
714
715 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
716 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
717 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
718 ARM_COMPUTE_ERROR_ON(output == nullptr);
719
720 // Create and configure function
721 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
722 func->configure(input0, input1, input2, output, detect_info);
723
724 // Log info
725 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
726 << node.name()
727 << " Type: " << node.type()
728 << " Target: " << TargetInfo::TargetType
729 << " Data Type: " << input0->info()->data_type()
730 << " Input0 shape: " << input0->info()->tensor_shape()
731 << " Input1 shape: " << input1->info()->tensor_shape()
732 << " Input2 shape: " << input2->info()->tensor_shape()
733 << " Output shape: " << output->info()->tensor_shape()
734 << " DetectionOutputLayer info: " << detect_info
735 << std::endl);
736
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000737 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000738}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000739
740/** Create a backend detection post process layer function
741 *
742 * @tparam DetectionPostProcessLayerFunction Backend detection output function
743 * @tparam TargetInfo Target-specific information
744 *
745 * @param[in] node Node to create the backend function for
746 *
747 * @return Backend detection post process layer function
748 */
749template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
750std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
751{
752 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
753
754 // Extract IO and info
755 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
756 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
757 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
758 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
759 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
760 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
761 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
762 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
763
764 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
765 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
766 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
767 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
768 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
769 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
770 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
771
772 // Create and configure function
773 auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
774 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
775
776 // Log info
777 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
778 << node.name()
779 << " Type: " << node.type()
780 << " Target: " << TargetInfo::TargetType
781 << " Data Type: " << input0->info()->data_type()
782 << " Input0 shape: " << input0->info()->tensor_shape()
783 << " Input1 shape: " << input1->info()->tensor_shape()
784 << " Input2 shape: " << input2->info()->tensor_shape()
785 << " Output0 shape: " << output0->info()->tensor_shape()
786 << " Output1 shape: " << output1->info()->tensor_shape()
787 << " Output2 shape: " << output2->info()->tensor_shape()
788 << " Output3 shape: " << output3->info()->tensor_shape()
789 << " DetectionPostProcessLayer info: " << detect_info
790 << std::endl);
791
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000792 return RETURN_UNIQUE_PTR(func);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000793}
794
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100795/** Create a backend element-wise operation layer function
796 *
797 * @tparam EltwiseFunctions Backend element-wise function
798 * @tparam TargetInfo Target-specific information
799 *
800 * @param[in] node Node to create the backend function for
801 *
802 * @return Backend element-wise operation layer function
803 */
804template <typename EltwiseFunctions, typename TargetInfo>
805std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
806{
807 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
808
809 // Extract IO and info
810 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
811 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
812 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
813 const EltwiseOperation eltwise_op = node.eltwise_operation();
814 const ConvertPolicy convert_policy = node.convert_policy();
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000815 const ActivationLayerInfo act_info = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100816 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
817 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
818 ARM_COMPUTE_ERROR_ON(output == nullptr);
819
820 std::unique_ptr<IFunction> func = nullptr;
821 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100822 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100823 {
824 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
825 std::string("ArithmeticAddition"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000826 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100827 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100828 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100829 {
830 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
831 std::string("ArithmeticSubtraction"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000832 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100833 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100834 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100835 {
836 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
837 std::string("PixelWiseMultiplication"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000838 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100839 }
thecha01f8e35842020-07-28 17:28:17 +0100840 else if(eltwise_op == EltwiseOperation::Max)
841 {
842 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
843 std::string("ElementwiseMaximum"),
844 input1, input2, output, act_info);
845 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100846 else
847 {
848 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
849 }
850
851 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000852 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
853 << node.name()
854 << " Type: " << node.type()
855 << " Target: " << TargetInfo::TargetType
856 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100857 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000858 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100859 << std::endl);
860
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000861 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100862}
863
Sheri Zhang16dddd22020-05-27 15:03:48 +0100864/** Create a backend unary element-wise operation layer function
865 *
866 * @tparam UnaryEltwiseFunctions Backend unary element-wise function
867 * @tparam TargetInfo Target-specific information
868 *
869 * @param[in] node Node to create the backend function for
870 *
871 * @return Backend unary element-wise operation layer function
872 */
873template <typename UnaryEltwiseFunctions, typename TargetInfo>
874std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
875{
876 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
877
878 // Extract IO and info
879 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
880 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
881 const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
882
883 ARM_COMPUTE_ERROR_ON(input == nullptr);
884 ARM_COMPUTE_ERROR_ON(output == nullptr);
885
886 std::unique_ptr<IFunction> func = nullptr;
887 std::string func_name;
888 if(eltwise_op == UnaryEltwiseOperation::Exp)
889 {
890 std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
891 std::string("Exp"),
892 input, output);
893 }
894 else
895 {
896 ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
897 }
898
899 // Log info
900 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
901 << node.name()
902 << " Type: " << node.type()
903 << " Target: " << TargetInfo::TargetType
904 << " Operation: " << func_name
905 << " Data Type: " << input->info()->data_type()
906 << " Shape: " << input->info()->tensor_shape()
907 << std::endl);
908
909 return RETURN_UNIQUE_PTR(func);
910}
911
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100912/** Create a backend flatten layer function
913 *
914 * @tparam FlattenLayerFunction Backend flatten function
915 * @tparam TargetInfo Target-specific information
916 *
917 * @param[in] node Node to create the backend function for
918 *
919 * @return Backend flatten layer function
920 */
921template <typename FlattenLayerFunction, typename TargetInfo>
922std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
923{
924 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
925
926 // Extract IO and info
927 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
928 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
929
Georgios Pinitase2220552018-07-20 13:23:44 +0100930 ARM_COMPUTE_ERROR_ON(input == nullptr);
931 ARM_COMPUTE_ERROR_ON(output == nullptr);
932
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100933 // Create and configure function
934 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
935 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100936
937 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000938 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
939 << node.name()
940 << " Type: " << node.type()
941 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100942 << " Data Type: " << input->info()->data_type()
943 << " Input shape: " << input->info()->tensor_shape()
944 << " Output shape: " << output->info()->tensor_shape()
945 << std::endl);
946
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000947 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100948}
949
950/** Create a backend fully connected layer function
951 *
952 * @tparam FullyConnectedLayerFunction Backend fully-connected function
953 * @tparam TargetInfo Target-specific information
954 *
955 * @param[in] node Node to create the backend function for
956 * @param[in] ctx Graph context
957 *
958 * @return Backend fully connected layer function
959 */
960template <typename FullyConnectedLayerFunction, typename TargetInfo>
961std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
962{
963 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
964
965 // Extract IO and info
966 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
967 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
968 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
969 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100970 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100971
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100972 ARM_COMPUTE_ERROR_ON(input == nullptr);
973 ARM_COMPUTE_ERROR_ON(weights == nullptr);
974 ARM_COMPUTE_ERROR_ON(output == nullptr);
975
Georgios Pinitase2220552018-07-20 13:23:44 +0100976 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100977 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
978 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
979 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +0100980 func->configure(input, weights, biases, output, fc_info);
981
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100982 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
983
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100984 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100985 std::ostringstream qss;
986 if(is_quantized)
987 {
988 qss << " Input QuantInfo: " << input->info()->quantization_info()
989 << " Weights QuantInfo: " << weights->info()->quantization_info()
990 << " Output QuantInfo: " << output->info()->quantization_info();
991 }
Pablo Tello32521432018-11-15 14:43:10 +0000992 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
993 << node.name()
994 << " Type: " << node.type()
995 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100996 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100997 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100998 << " Input shape: " << input->info()->tensor_shape()
999 << " Weights shape: " << weights->info()->tensor_shape()
1000 << " Output shape: " << output->info()->tensor_shape()
1001 << std::endl);
1002
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001003 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001004}
1005
Manuel Bottini5209be52019-02-13 16:34:56 +00001006/** Create a backend generate proposals layer function
1007 *
1008 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
1009 * @tparam TargetInfo Target-specific information
1010 *
1011 * @param[in] node Node to create the backend function for
1012 * @param[in] ctx Graph context
1013 *
1014 * @return Backend generate proposals layer function
1015 */
1016template <typename GenerateProposalsLayerFunction, typename TargetInfo>
1017std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
1018{
1019 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1020
1021 // Extract IO and info
1022 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1023 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1024 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1025 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1026 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1027 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1028 const GenerateProposalsInfo info = node.info();
1029
1030 ARM_COMPUTE_ERROR_ON(scores == nullptr);
1031 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1032 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1033 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1034 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1035
1036 // Create and configure function
1037 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1038 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1039
1040 // Log info
1041 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1042 << " Target " << TargetInfo::TargetType
1043 << " Data Type: " << scores->info()->data_type()
1044 << " Scores shape: " << scores->info()->tensor_shape()
1045 << " Deltas shape: " << deltas->info()->tensor_shape()
1046 << " Anchors shape: " << anchors->info()->tensor_shape()
1047 << " Proposals shape: " << proposals->info()->tensor_shape()
1048 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1049 << " Scores Out shape: " << scores_out->info()->tensor_shape()
1050 << std::endl);
1051
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001052 return RETURN_UNIQUE_PTR(func);
Manuel Bottini5209be52019-02-13 16:34:56 +00001053}
1054
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001055/** Create a backend normalization layer function
1056 *
1057 * @tparam NormalizationLayerFunction Backend normalization function
1058 * @tparam TargetInfo Target-specific information
1059 *
1060 * @param[in] node Node to create the backend function for
1061 * @param[in] ctx Graph context
1062 *
1063 * @return Backend normalization layer function
1064 */
1065template <typename NormalizationLayerFunction, typename TargetInfo>
1066std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1067{
1068 ARM_COMPUTE_UNUSED(ctx);
1069
1070 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1071
1072 // Extract IO and info
1073 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1074 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1075 const NormalizationLayerInfo norm_info = node.normalization_info();
1076 ARM_COMPUTE_ERROR_ON(input == nullptr);
1077 ARM_COMPUTE_ERROR_ON(output == nullptr);
1078
1079 // Create and configure function
1080 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
1081 func->configure(input, output, norm_info);
1082
1083 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001084 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1085 << node.name()
1086 << " Type: " << node.type()
1087 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001088 << " Data Type: " << input->info()->data_type()
1089 << " Input shape: " << input->info()->tensor_shape()
1090 << " Output shape: " << output->info()->tensor_shape()
1091 << " Normalization info: " << norm_info.type()
1092 << std::endl);
1093
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001094 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001095}
1096
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001097/** Create a backend normalize planar YUV layer function
1098 *
1099 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1100 * @tparam TargetInfo Target-specific information
1101 *
1102 * @param[in] node Node to create the backend function for
1103 *
1104 * @return Backend normalize plnar YUV layer function
1105 */
1106template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1107std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1108{
1109 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1110
1111 // Extract IO and info
1112 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1113 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1114 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1115 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1116 ARM_COMPUTE_ERROR_ON(input == nullptr);
1117 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1118 ARM_COMPUTE_ERROR_ON(std == nullptr);
1119 ARM_COMPUTE_ERROR_ON(output == nullptr);
1120
1121 // Create and configure function
1122 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1123 func->configure(input, output, mean, std);
1124
1125 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001126 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1127 << node.name()
1128 << " Type: " << node.type()
1129 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001130 << " Data Type: " << input->info()->data_type()
1131 << " Shape: " << input->info()->tensor_shape()
1132 << std::endl);
1133
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001134 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001135}
1136
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001137/** Create a backend pad layer function
1138 *
1139 * @tparam PadLayerFunction Backend pad function
1140 * @tparam TargetInfo Target-specific information
1141 *
1142 * @param[in] node Node to create the backend function for
1143 *
1144 * @return Backend pad layer function
1145 */
1146template <typename PadLayerFunction, typename TargetInfo>
1147std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1148{
1149 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1150
1151 // Extract IO and info
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001152 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1153 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1154 const PaddingList &padding = node.padding();
1155 const PixelValue pad_value = node.pad_value();
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001156 ARM_COMPUTE_ERROR_ON(input == nullptr);
1157 ARM_COMPUTE_ERROR_ON(output == nullptr);
1158
1159 // Create and configure function
1160 auto func = support::cpp14::make_unique<PadLayerFunction>();
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001161 func->configure(input, output, padding, pad_value);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001162
1163 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001164 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1165 << node.name()
1166 << " Type: " << node.type()
1167 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001168 << " Data Type: " << input->info()->data_type()
1169 << " Input shape: " << input->info()->tensor_shape()
1170 << " Output shape: " << output->info()->tensor_shape()
1171 << std::endl);
1172
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001173 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001174}
1175
Georgios Pinitas57c48242018-08-02 13:41:49 +01001176/** Create a backend permute layer function
1177 *
1178 * @tparam PermuteLayerFunction Backend permute function
1179 * @tparam TargetInfo Target-specific information
1180 *
1181 * @param[in] node Node to create the backend function for
1182 *
1183 * @return Backend permute layer function
1184 */
1185template <typename PermuteLayerFunction, typename TargetInfo>
1186std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1187{
1188 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1189
1190 // Extract IO and info
1191 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1192 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1193 const PermutationVector &perm = node.permutation_vector();
1194 ARM_COMPUTE_ERROR_ON(input == nullptr);
1195 ARM_COMPUTE_ERROR_ON(output == nullptr);
1196
1197 // Create and configure function
1198 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1199 func->configure(input, output, perm);
1200
1201 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001202 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1203 << node.name()
1204 << " Type: " << node.type()
1205 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001206 << " Data Type: " << input->info()->data_type()
1207 << " Input shape: " << input->info()->tensor_shape()
1208 << " Output shape: " << output->info()->tensor_shape()
1209 << " Permutation vector: " << perm
1210 << std::endl);
1211
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001212 return RETURN_UNIQUE_PTR(func);
Georgios Pinitas57c48242018-08-02 13:41:49 +01001213}
1214
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001215/** Create a backend pooling layer function
1216 *
1217 * @tparam PoolingLayerFunction Backend pooling function
1218 * @tparam TargetInfo Target-specific information
1219 *
1220 * @param[in] node Node to create the backend function for
1221 *
1222 * @return Backend pooling layer function
1223 */
1224template <typename PoolingLayerFunction, typename TargetInfo>
1225std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1226{
1227 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1228
1229 // Extract IO and info
1230 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1231 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1232 const PoolingLayerInfo pool_info = node.pooling_info();
1233 ARM_COMPUTE_ERROR_ON(input == nullptr);
1234 ARM_COMPUTE_ERROR_ON(output == nullptr);
1235
1236 // Create and configure function
1237 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1238 func->configure(input, output, pool_info);
1239
1240 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001241 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1242 << node.name()
1243 << " Type: " << node.type()
1244 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001245 << " Data Type: " << input->info()->data_type()
1246 << " Input shape: " << input->info()->tensor_shape()
1247 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001248 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001249 << std::endl);
1250
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001251 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001252}
1253
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001254/** Create a backend PRelu layer function
1255 *
1256 * @tparam PReluFunction Backend PRelu function
1257 * @tparam TargetInfo Target-specific information
1258 *
1259 * @param[in] node Node to create the backend function for
1260 *
1261 * @return Backend PRelu layer function
1262 */
1263template <typename PReluFunction, typename TargetInfo>
1264std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1265{
1266 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1267
1268 // Extract IO and info
1269 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1270 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1271 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1272 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1273 ARM_COMPUTE_ERROR_ON(output == nullptr);
1274
1275 // Create and configure function
1276 auto func = support::cpp14::make_unique<PReluFunction>();
1277 func->configure(input, alpha, output);
1278
1279 // Log info
1280 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1281 << node.name()
1282 << " Type: " << node.type()
1283 << " Target: " << TargetInfo::TargetType
1284 << " Data Type: " << input->info()->data_type()
1285 << " Input shape: " << input->info()->tensor_shape()
1286 << " Output shape: " << output->info()->tensor_shape()
1287 << std::endl);
1288
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001289 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001290}
1291
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001292/** Create a backend print layer function
1293 *
1294 * @tparam TargetInfo Target-specific information
1295 *
1296 * @param[in] node Node to create the backend function for
1297 *
1298 * @return Backend print layer function
1299 */
1300template <typename TargetInfo>
1301std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1302{
1303 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1304
1305 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1306 ARM_COMPUTE_ERROR_ON(input == nullptr);
1307 ARM_COMPUTE_UNUSED(input);
1308
1309 // Log info
1310 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1311 << node.name()
1312 << " Type: " << node.type()
1313 << " Target: " << TargetInfo::TargetType
1314 << " Data Type: " << input->info()->data_type()
1315 << " Input shape: " << input->info()->tensor_shape()
1316 << std::endl);
1317
1318 return nullptr;
1319}
1320
Pablo Tello32521432018-11-15 14:43:10 +00001321/** Create a backend priorbox layer function
1322 *
1323 * @tparam PriorBoxLayerFunction Backend priorbox function
1324 * @tparam TargetInfo Target-specific information
1325 *
1326 * @param[in] node Node to create the backend function for
1327 *
1328 * @return Backend priorbox layer function
1329 */
1330template <typename PriorBoxLayerFunction, typename TargetInfo>
1331std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1332{
1333 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1334
1335 // Extract IO and info
1336 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1337 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1338 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1339 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1340 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1341 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1342 ARM_COMPUTE_ERROR_ON(output == nullptr);
1343
1344 // Create and configure function
1345 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1346 func->configure(input0, input1, output, prior_info);
1347
1348 // Log info
1349 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1350 << node.name()
1351 << " Type: " << node.type()
1352 << " Target: " << TargetInfo::TargetType
1353 << " Data Type: " << input0->info()->data_type()
1354 << " Input0 shape: " << input0->info()->tensor_shape()
1355 << " Input1 shape: " << input1->info()->tensor_shape()
1356 << " Output shape: " << output->info()->tensor_shape()
1357 << " PriorBoxLayer info: " << prior_info
1358 << std::endl);
1359
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001360 return RETURN_UNIQUE_PTR(func);
Pablo Tello32521432018-11-15 14:43:10 +00001361}
1362
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001363/** Create a backend quantization layer function
1364 *
1365 * @tparam QuantizationLayerFunction Backend quantization function
1366 * @tparam TargetInfo Target-specific information
1367 *
1368 * @param[in] node Node to create the backend function for
1369 *
1370 * @return Backend quantization layer function
1371 */
1372template <typename QuantizationLayerFunction, typename TargetInfo>
1373std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1374{
1375 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1376
1377 // Extract IO and info
1378 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1379 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1380 ARM_COMPUTE_ERROR_ON(input == nullptr);
1381 ARM_COMPUTE_ERROR_ON(output == nullptr);
1382
1383 // Create and configure function
1384 auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1385 func->configure(input, output);
1386
1387 // Log info
1388 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1389 << node.name()
1390 << " Type: " << node.type()
1391 << " Target: " << TargetInfo::TargetType
1392 << " Data Type: " << input->info()->data_type()
1393 << " Input shape: " << input->info()->tensor_shape()
1394 << " Output shape: " << output->info()->tensor_shape()
1395 << std::endl);
1396
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001397 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001398}
1399
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001400/** Create a backend reorg layer function
1401 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001402 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001403 * @tparam TargetInfo Target-specific information
1404 *
1405 * @param[in] node Node to create the backend function for
1406 *
1407 * @return Backend reshape layer function
1408 */
1409template <typename ReorgLayerFunction, typename TargetInfo>
1410std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1411{
1412 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1413
1414 // Extract IO and info
1415 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1416 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1417 ARM_COMPUTE_ERROR_ON(input == nullptr);
1418 ARM_COMPUTE_ERROR_ON(output == nullptr);
1419
1420 // Create and configure function
1421 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1422 func->configure(input, output, node.stride());
1423
1424 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001425 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1426 << node.name()
1427 << " Type: " << node.type()
1428 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001429 << " Data Type: " << input->info()->data_type()
1430 << " Input shape: " << input->info()->tensor_shape()
1431 << " Output shape: " << output->info()->tensor_shape()
1432 << std::endl);
1433
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001434 return RETURN_UNIQUE_PTR(func);
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001435}
1436
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001437/** Create a backend reshape layer function
1438 *
1439 * @tparam ReshapeLayerFunction Backend reshape function
1440 * @tparam TargetInfo Target-specific information
1441 *
1442 * @param[in] node Node to create the backend function for
1443 *
1444 * @return Backend reshape layer function
1445 */
1446template <typename ReshapeLayerFunction, typename TargetInfo>
1447std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1448{
1449 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1450
1451 // Extract IO and info
1452 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1453 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1454 ARM_COMPUTE_ERROR_ON(input == nullptr);
1455 ARM_COMPUTE_ERROR_ON(output == nullptr);
1456
1457 // Create and configure function
1458 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1459 func->configure(input, output);
1460
1461 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001462 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1463 << node.name()
1464 << " Type: " << node.type()
1465 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001466 << " Data Type: " << input->info()->data_type()
1467 << " Input shape: " << input->info()->tensor_shape()
1468 << " Output shape: " << output->info()->tensor_shape()
1469 << std::endl);
1470
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001471 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001472}
1473
1474/** Create a backend resize layer function
1475 *
1476 * @tparam ResizeLayerFunction Backend resize function
1477 * @tparam TargetInfo Target-specific information
1478 *
1479 * @param[in] node Node to create the backend function for
1480 *
1481 * @return Backend resize layer function
1482 */
1483template <typename ResizeLayerFunction, typename TargetInfo>
1484std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1485{
1486 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1487
1488 // Extract IO and info
1489 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1490 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1491 ARM_COMPUTE_ERROR_ON(input == nullptr);
1492 ARM_COMPUTE_ERROR_ON(output == nullptr);
1493 const InterpolationPolicy policy = node.policy();
1494
1495 // Create and configure function
1496 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
Sang-Hoon Parkccd94962020-06-09 12:09:24 +01001497 func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT });
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001498
1499 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001500 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1501 << node.name()
1502 << " Type: " << node.type()
1503 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001504 << " Data Type: " << input->info()->data_type()
1505 << " Input shape: " << input->info()->tensor_shape()
1506 << " Output shape: " << output->info()->tensor_shape()
1507 << " Interpolation: " << policy
1508 << std::endl);
1509
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001510 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001511}
1512
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001513/** Create a backend ROI align layer function
1514 *
1515 * @tparam ROIAlignLayerFunction ROI Align function
1516 * @tparam TargetInfo Target-specific information
1517 *
1518 * @param[in] node Node to create the backend function for
1519 *
1520 * @return ROI Align layer function
1521 */
1522template <typename ROIAlignLayerFunction, typename TargetInfo>
1523std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1524{
1525 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1526
1527 // Extract IO and info
1528 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1529 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1530 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1531 ARM_COMPUTE_ERROR_ON(input == nullptr);
1532 ARM_COMPUTE_ERROR_ON(output == nullptr);
1533 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1534
1535 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1536
1537 // Create and configure function
1538 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1539
1540 func->configure(input, rois, output, pool_info);
1541
1542 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001543 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1544 << node.name()
1545 << " Type: " << node.type()
1546 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001547 << " Data Type: " << input->info()->data_type()
1548 << " Input shape: " << input->info()->tensor_shape()
1549 << " Output shape: " << output->info()->tensor_shape()
1550 << " ROIs shape: " << rois->info()->tensor_shape()
1551 << " ROIPooling width: " << pool_info.pooled_width()
1552 << " ROIPooling height: " << pool_info.pooled_height()
1553 << std::endl);
1554
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001555 return RETURN_UNIQUE_PTR(func);
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001556}
1557
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001558/** Create a backend slice layer function
1559 *
1560 * @tparam SliceLayerFunction Backend slice function
1561 * @tparam TargetInfo Target-specific information
1562 *
1563 * @param[in] node Node to create the backend function for
1564 *
1565 * @return Backend slice layer function
1566 */
1567template <typename SliceLayerFunction, typename TargetInfo>
1568std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1569{
1570 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1571
1572 // Extract IO and info
1573 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1574 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1575 ARM_COMPUTE_ERROR_ON(input == nullptr);
1576 ARM_COMPUTE_ERROR_ON(output == nullptr);
1577
1578 // Create and configure function
1579 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1580 func->configure(input, output, node.starts(), node.ends());
1581
1582 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001583 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1584 << node.name()
1585 << " Type: " << node.type()
1586 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001587 << " Data Type: " << input->info()->data_type()
1588 << " Input shape: " << input->info()->tensor_shape()
1589 << " Output shape: " << output->info()->tensor_shape()
1590 << std::endl);
1591
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001592 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001593}
1594
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001595/** Create a backend softmax layer function
1596 *
1597 * @tparam SoftmaxLayerFunction Backend softmax function
1598 * @tparam TargetInfo Target-specific information
1599 *
1600 * @param[in] node Node to create the backend function for
1601 * @param[in] ctx Graph context
1602 *
1603 * @return Backend softmax layer function
1604 */
1605template <typename SoftmaxLayerFunction, typename TargetInfo>
1606std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1607{
1608 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1609
1610 // Extract IO and info
1611 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1612 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1613 const float beta = node.beta();
1614 ARM_COMPUTE_ERROR_ON(input == nullptr);
1615 ARM_COMPUTE_ERROR_ON(output == nullptr);
1616
1617 // Create and configure function
1618 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1619 func->configure(input, output, beta);
1620
1621 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001622 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1623 << node.name()
1624 << " Type: " << node.type()
1625 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001626 << " Data Type: " << input->info()->data_type()
1627 << " Input shape: " << input->info()->tensor_shape()
1628 << " Output shape: " << output->info()->tensor_shape()
1629 << std::endl);
1630
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001631 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001632}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001633
1634/** Create a backend layer stack function
1635 *
1636 * @tparam StackLayerFunction Backend stack function
1637 * @tparam TargetInfo Target-specific information
1638 *
1639 * @param[in] node Node to create the backend function for
1640 *
1641 * @return Backend stack layer function
1642 */
1643template <typename StackLayerFunction, typename TargetInfo>
1644std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1645{
1646 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1647 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1648
1649 // Extract IO and info
1650 std::vector<typename TargetInfo::TensorType *> inputs;
1651 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1652 {
1653 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1654 }
1655 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1656 const int axis = node.axis();
1657
1658 // Create and configure function
1659 auto func = support::cpp14::make_unique<StackLayerFunction>();
1660 func->configure(inputs, axis, output);
1661
1662 // Log info
1663 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1664 << node.name()
1665 << " Type: " << node.type()
1666 << " Target: " << TargetInfo::TargetType
1667 << " Data Type: " << output->info()->data_type()
1668 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1669 << " Output shape: " << output->info()->tensor_shape()
1670 << " Num Inputs: " << inputs.size()
1671 << " Axis: " << axis
1672 << std::endl);
1673
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001674 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioec699752019-03-22 15:25:32 +00001675}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001676/** Create a backend Upsample layer function
1677 *
1678 * @tparam UpsampleLayerFunction Backend Upsample function
1679 * @tparam TargetInfo Target-specific information
1680 *
1681 * @param[in] node Node to create the backend function for
1682 * @param[in] ctx Graph context
1683 *
1684 * @return Backend Upsample layer function
1685 */
1686template <typename UpsampleLayerFunction, typename TargetInfo>
1687std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1688{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001689 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001690 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1691
1692 // Extract IO and info
1693 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1694 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1695 const Size2D info = node.info();
1696 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1697 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1698 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1699 ARM_COMPUTE_ERROR_ON(input == nullptr);
1700 ARM_COMPUTE_ERROR_ON(output == nullptr);
1701
1702 // Create and configure function
1703 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1704 func->configure(input, output, info, upsampling_policy);
1705
1706 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001707 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1708 << node.name()
1709 << " Type: " << node.type()
1710 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001711 << " Data Type: " << input->info()->data_type()
1712 << " Input shape: " << input->info()->tensor_shape()
1713 << " Output shape: " << output->info()->tensor_shape()
1714 << " Strides: " << info
1715 << " Upsampling policy: " << upsampling_policy
1716 << std::endl);
1717
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001718 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001719}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001720/** Create a backend YOLO layer function
1721 *
1722 * @tparam YoloLayerFunction Backend YOLO function
1723 * @tparam TargetInfo Target-specific information
1724 *
1725 * @param[in] node Node to create the backend function for
1726 * @param[in] ctx Graph context
1727 *
1728 * @return Backend YOLO layer function
1729 */
1730template <typename YOLOlayerFunction, typename TargetInfo>
1731std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1732{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001733 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001734 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1735
1736 // Extract IO and info
1737 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1738 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1739 const ActivationLayerInfo act_info = node.activation_info();
1740 const int32_t num_classes = node.num_classes();
1741 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1742 ARM_COMPUTE_ERROR_ON(input == nullptr);
1743 ARM_COMPUTE_ERROR_ON(output == nullptr);
1744
1745 // Create and configure function
1746 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1747 func->configure(input, output, act_info, num_classes);
1748
1749 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001750 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1751 << node.name()
1752 << " Type: " << node.type()
1753 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001754 << " Data Type: " << input->info()->data_type()
1755 << " Input shape: " << input->info()->tensor_shape()
1756 << " Output shape: " << output->info()->tensor_shape()
1757 << " Activation function: " << act_info.activation()
1758 << " Num classes: " << num_classes
1759 << std::endl);
1760
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001761 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001762}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001763} // namespace detail
1764} // namespace backends
1765} // namespace graph
1766} // namespace arm_compute
1767
Michalis Spyrouf4643372019-11-29 16:17:13 +00001768#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */