blob: 1e420a803f6ec618d71ac729d3c6a803df4aa62a [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Alessandro Navone6413e492021-02-02 11:39:05 +00002 * Copyright (c) 2018-2021 Arm Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
SiCongLi31778612021-11-12 17:33:45 +000027#include "arm_compute/core/experimental/IPostOp.h"
28#include "arm_compute/core/experimental/PostOps.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010029#include "arm_compute/graph/Logger.h"
30#include "arm_compute/graph/Tensor.h"
31#include "arm_compute/graph/TypePrinter.h"
32#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010033#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000034#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010035#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010036#include "arm_compute/graph/backends/Utils.h"
37#include "arm_compute/graph/nodes/Nodes.h"
38
39#include "arm_compute/core/Error.h"
40#include "arm_compute/core/Helpers.h"
41#include "arm_compute/core/ITensorInfo.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010042#include "support/Cast.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010043
44namespace arm_compute
45{
46namespace graph
47{
48namespace backends
49{
50namespace detail
51{
52/** Returns backing tensor of a given tensor
53 *
54 * @tparam TargetInfo Target information
55 *
56 * @param[in] tensor Tensor to extract the backing tensor from
57 *
58 * @return Backing tensor if present else nullptr
59 */
60template <typename TargetInfo>
61typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
62{
63 typename TargetInfo::TensorType *backing_tensor = nullptr;
64 if(tensor != nullptr)
65 {
66 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
67 // Get backing tensor handle
68 ITensorHandle *tensor_handle = tensor->handle();
69 // Get backing tensor
70 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
71 }
72
73 return backing_tensor;
74}
75
76template <typename TargetInfo>
77void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
78{
79 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000080 << " Target: " << TargetInfo::TargetType
81 << " ID: " << node.id()
82 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010083 << std::endl);
84
85 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
86 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
87 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010088 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010089}
90
91/** Creates a backend activation layer function
92 *
93 * @tparam ActivationLayerFunction Backend activation function
94 * @tparam TargetInfo Target-specific information
95 *
96 * @param[in] node Node to create the backend function for
97 *
98 * @return Backend activation layer function
99 */
100template <typename ActivationLayerFunction, typename TargetInfo>
101std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
102{
103 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
104
105 // Extract IO and info
106 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
107 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
108 const ActivationLayerInfo act_info = node.activation_info();
109
110 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000111 auto func = std::make_unique<ActivationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100112 func->configure(input, output, act_info);
113
Pablo Tello32521432018-11-15 14:43:10 +0000114 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
115 << node.name()
116 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000117 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100118 << " Data Type: " << input->info()->data_type()
119 << " Shape: " << input->info()->tensor_shape()
120 << " Activation function: " << act_info.activation()
121 << " a: " << act_info.a()
122 << " b: " << act_info.b()
123 << " InPlace : " << is_in_place_operation(input, output)
124 << std::endl);
125
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100126 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100127}
128
thecha01e8f05da2020-08-24 17:21:41 +0100129/** Creates a backend argminmax layer function
130 *
131 * @tparam ArgMinMaxLayerFunction Backend activation function
132 * @tparam TargetInfo Target-specific information
133 *
134 * @param[in] node Node to create the backend function for
135 *
136 * @return Backend argminmax layer function
137 */
138template <typename ArgMinMaxLayerFunction, typename TargetInfo>
139std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
140{
141 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
142
143 // Extract IO and info
144 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
145 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
146 const ReductionOperation op = node.reduction_operation();
147 unsigned int axis = node.axis();
148
149 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000150 auto func = std::make_unique<ArgMinMaxLayerFunction>();
thecha01e8f05da2020-08-24 17:21:41 +0100151 func->configure(input, axis, output, op);
152
153 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
154 << node.name()
155 << " Type: " << node.type()
156 << " Target: " << TargetInfo::TargetType
157 << " Data Type: " << input->info()->data_type()
158 << " Shape: " << input->info()->tensor_shape()
159 << " Reduction Operation: " << op
160 << " axis: " << axis
161 << std::endl);
162
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100163 return std::move(func);
thecha01e8f05da2020-08-24 17:21:41 +0100164}
165
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100166/** Create a backend batch normalization layer function
167 *
168 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
169 * @tparam TargetInfo Target-specific information
170 *
171 * @param[in] node Node to create the backend function for
172 *
173 * @return Backend batch normalization layer function
174 */
175template <typename BatchNormalizationLayerFunction, typename TargetInfo>
176std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
177{
178 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
179
180 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000181 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
182 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
183 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
184 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
185 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
186
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100187 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
188 const float epsilon = node.epsilon();
189 const ActivationLayerInfo fused_act = node.fused_activation();
190
191 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000192 auto func = std::make_unique<BatchNormalizationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100193 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
194
195 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000196 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
197 << node.name()
198 << " Type: " << node.type()
199 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100200 << " Data Type: " << input->info()->data_type()
201 << " Shape: " << input->info()->tensor_shape()
202 << " Epsilon: " << epsilon << " "
203 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000204 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100205 << std::endl);
206
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100207 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100208}
209
giuros01acce5042019-02-21 17:32:34 +0000210/** Create a backend batch normalization layer function
211 *
212 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
213 * @tparam TargetInfo Target-specific information
214 *
215 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000216 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000217 *
218 * @return Backend batch normalization layer function
219 */
220template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000221std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000222{
223 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
224
225 // Extract IO and info
226 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
227 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
228 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
229 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
230 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
231 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
232 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
233
234 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
235
236 const PadStrideInfo conv_info = node.convolution_info();
237 const unsigned int num_groups = node.num_groups();
238 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
239 const ActivationLayerInfo fused_act = node.fused_activation();
240 const float epsilon = node.epsilon();
241
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000242 // Create and configure function (we assume that functions have been validated before creation)
243 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
244 std::unique_ptr<IFunction> func;
245 std::string func_name;
246
247 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
248
giuros01acce5042019-02-21 17:32:34 +0000249 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000250 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
251 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000252
253 // Log info
254 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
255 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100256 << " Type: " << node.type()
257 << " Target: " << TargetInfo::TargetType
258 << " Data Type: " << input->info()->data_type()
259 << " Input shape: " << input->info()->tensor_shape()
260 << " Weights shape: " << weights->info()->tensor_shape()
261 << " Output shape: " << output->info()->tensor_shape()
262 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
263 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100264 return std::move(func);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100265}
266
267/** Create a backend fused depthwise convolution batch normalization layer function
268 *
269 * @tparam FusedLayerTypes Fused layer types
270 * @tparam TargetInfo Target-specific information
271 *
272 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000273 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100274 *
275 * @return Backend fused depthwise convolution batch normalization layer function
276 */
277template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000278std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100279{
280 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
281
282 // Extract IO and info
283 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
284 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
285 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
286 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
287 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
288 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
289 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
290
291 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
292
293 const PadStrideInfo conv_info = node.convolution_info();
294 const unsigned int depth_multiplier = node.depth_multiplier();
295 const ActivationLayerInfo fused_act = node.fused_activation();
296 const float epsilon = node.epsilon();
297
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000298 // Create and configure function (we assume that functions have been validated before creation)
299 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
300 std::unique_ptr<IFunction> func;
301 std::string func_name;
302
303 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
304
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100305 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000306 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
307 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100308
309 // Log info
310 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
311 << node.name()
312 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000313 << " Target: " << TargetInfo::TargetType
314 << " Data Type: " << input->info()->data_type()
315 << " Input shape: " << input->info()->tensor_shape()
316 << " Weights shape: " << weights->info()->tensor_shape()
317 << " Output shape: " << output->info()->tensor_shape()
318 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
319 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100320 return std::move(func);
giuros01acce5042019-02-21 17:32:34 +0000321}
322
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100323/** Create a backend bounding box transform layer function
324 *
325 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
326 * @tparam TargetInfo Target-specific information
327 *
328 * @param[in] node Node to create the backend function for
329 *
330 * @return Backend bounding box transform layer function
331 */
332template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
333std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
334{
335 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
336
337 // Extract IO and info
338 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
339 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
340 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
341 const BoundingBoxTransformInfo bbox_info = node.info();
342
343 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000344 auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100345 func->configure(input, output, deltas, bbox_info);
346
347 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000348 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
349 << node.name()
350 << " Type: " << node.type()
351 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100352 << " Data Type: " << input->info()->data_type()
353 << " Shape: " << input->info()->tensor_shape()
354 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
355 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
356 << std::endl);
357
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100358 return std::move(func);
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100359}
360
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100361/** Create a backend channel shuffle layer function
362 *
363 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
364 * @tparam TargetInfo Target-specific information
365 *
366 * @param[in] node Node to create the backend function for
367 *
368 * @return Backend channel shuffle layer function
369 */
370template <typename ChannelShuffleLayerFunction, typename TargetInfo>
371std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
372{
373 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
374
375 // Extract IO and info
376 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
377 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
378 const unsigned int num_groups = node.num_groups();
379
380 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000381 auto func = std::make_unique<ChannelShuffleLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100382 func->configure(input, output, num_groups);
383
Pablo Tello32521432018-11-15 14:43:10 +0000384 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
385 << node.name()
386 << " Type: " << node.type()
387 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100388 << " Data Type: " << input->info()->data_type()
389 << " Shape: " << input->info()->tensor_shape()
390 << " Num groups: " << num_groups
391 << std::endl);
392
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100393 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100394}
395
Georgios Pinitase2220552018-07-20 13:23:44 +0100396/** Create a backend layer concatenate function
397 *
398 * @tparam ConcatenateLayerFunction Backend concatenate function
399 * @tparam TargetInfo Target-specific information
400 *
401 * @param[in] node Node to create the backend function for
402 *
403 * @return Backend concatenate layer function
404 */
405template <typename ConcatenateLayerFunction, typename TargetInfo>
406std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
407{
408 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
409 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
410
411 // Return nullptr if depth concatenate is switched off
412 if(!node.is_enabled())
413 {
414 return nullptr;
415 }
416
417 // Extract IO and info
Georgios Pinitas4667ddd2020-07-13 21:21:33 +0100418 std::vector<typename TargetInfo::SrcTensorType *> inputs;
Georgios Pinitase2220552018-07-20 13:23:44 +0100419 for(unsigned int i = 0; i < node.num_inputs(); ++i)
420 {
421 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
422 }
423 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100424 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
425 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100426
427 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000428 auto func = std::make_unique<ConcatenateLayerFunction>();
Georgios Pinitase2220552018-07-20 13:23:44 +0100429 func->configure(inputs, output, concat_axis);
430
431 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000432 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
433 std::ostringstream qss;
434 if(is_quantized)
435 {
436 qss << " Output QuantInfo: " << output->info()->quantization_info();
437 }
Pablo Tello32521432018-11-15 14:43:10 +0000438 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
439 << node.name()
440 << " Type: " << node.type()
441 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100442 << " Data Type: " << output->info()->data_type()
443 << " Shape: " << output->info()->tensor_shape()
444 << " Num Inputs: " << inputs.size()
445 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000446 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100447 << std::endl);
448
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100449 return std::move(func);
Georgios Pinitase2220552018-07-20 13:23:44 +0100450}
451
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100452/** Create a backend convolution layer function
453 *
454 * @tparam ConvolutionLayerFunctions Backend convolution functions
Sheri Zhangfb228032021-11-02 10:45:07 +0000455 * @tparam TargetInfo Target-specific information
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100456 *
457 * @param[in] node Node to create the backend function for
458 * @param[in] ctx Graph context
459 *
460 * @return Backend convolution layer function
461 */
462template <typename ConvolutionLayerFunctions, typename TargetInfo>
463std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
464{
465 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
466
467 // Extract IO and info
468 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
469 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
470 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
471 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
472
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100473 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
474
475 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100476 {
477 biases->info()->set_data_type(DataType::S32);
478 }
479
Georgios Pinitas08346e92018-10-16 19:10:46 +0100480 const PadStrideInfo conv_info = node.convolution_info();
481 const unsigned int num_groups = node.num_groups();
482 const ConvolutionMethod conv_algorithm = node.convolution_method();
483 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
484 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100485
486 // Create and configure function (we assume that functions have been validated before creation)
487 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
488 std::unique_ptr<IFunction> func;
489 std::string func_name;
490
Georgios Pinitase2220552018-07-20 13:23:44 +0100491 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100492 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100493 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100494 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
495 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100496 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100497 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100498 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100499 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100500 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100501 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
502 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100503 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100504 }
505 else if(conv_algorithm == ConvolutionMethod::GEMM)
506 {
507 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
508 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100509 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100510 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100511 }
512 else
513 {
514 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
515 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100516 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100517 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100518 }
519
520 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100521 std::ostringstream qss;
522 if(is_quantized)
523 {
524 qss << " Input QuantInfo: " << input->info()->quantization_info()
525 << " Weights QuantInfo: " << weights->info()->quantization_info()
526 << " Output QuantInfo: " << output->info()->quantization_info();
527 }
Pablo Tello32521432018-11-15 14:43:10 +0000528 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
529 << node.name()
530 << " Type: " << func_name
531 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100532 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100533 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100534 << " Input shape: " << input->info()->tensor_shape()
535 << " Weights shape: " << weights->info()->tensor_shape()
536 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000537 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100538 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100539 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100540 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100541}
542
Sheri Zhangfb228032021-11-02 10:45:07 +0000543/** Create a backend convolution layer function with post opreator
544 *
545 * @tparam ConvolutionLayerFunctions Backend convolution functions
546 * @tparam TargetInfo Target-specific information
547 *
548 * @param[in] node Node to create the backend function for
549 * @param[in] ctx Graph context
550 *
551 * @return Backend convolution layer function
552 */
553template <typename ConvolutionLayerFunctions, typename TargetInfo>
554std::unique_ptr<IFunction> create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
555{
556 validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
557
558 // Extract IO and info
559 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
560 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
561 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
562 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
563
564 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
565
566 if(is_quantized)
567 {
568 biases->info()->set_data_type(DataType::S32);
569 }
570
571 const PadStrideInfo conv_info = node.convolution_info();
572 const unsigned int num_groups = node.num_groups();
573 const ActivationLayerInfo fused_act = node.fused_activation();
574
575 experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
576
577 auto &post_op_info_list = node.post_op_info_list();
578 for(const auto &post_op_info : post_op_info_list)
579 {
580 switch(post_op_info->type())
581 {
582 case PostOpType::Activation:
583 {
584 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
585 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
586 break;
587 }
588 case PostOpType::Eltwise_Add:
589 {
590 typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
591 const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
592 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
593 break;
594 }
595 default:
596 {
597 ARM_COMPUTE_ERROR("Unsupported PostOpType");
598 }
599 }
600 }
601
602 // Create and configure function (we assume that functions have been validated before creation)
603 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
604 std::unique_ptr<IFunction> func;
605 std::string func_name;
606
Sheri Zhangc65023e2021-11-03 21:24:00 +0000607 // Fuse convolution with post ops is only supported for conv1x1, which is only implemented as gemmconv2d
Sheri Zhangfb228032021-11-02 10:45:07 +0000608 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
609 std::string("GEMMConvolutionLayer"), mm,
610 input, weights, biases, output, conv_info,
611 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
612
613 // Log info
614 std::ostringstream qss;
615 if(is_quantized)
616 {
617 qss << " Input QuantInfo: " << input->info()->quantization_info()
618 << " Weights QuantInfo: " << weights->info()->quantization_info()
619 << " Output QuantInfo: " << output->info()->quantization_info();
620 }
621 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
622 << node.name()
623 << " Type: " << func_name
624 << " Target: " << TargetInfo::TargetType
625 << " Data Type: " << input->info()->data_type()
626 << " Groups: " << num_groups
627 << " Input shape: " << input->info()->tensor_shape()
628 << " Weights shape: " << weights->info()->tensor_shape()
629 << " Output shape: " << output->info()->tensor_shape()
630 << qss.str()
631 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
632 << std::endl);
633 return std::move(func);
634}
635
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100636/** Create a backend deconvolution layer function
637 *
638 * @tparam DeconvolutionLayerFunction Backend deconvolution function
639 * @tparam TargetInfo Target-specific information
640 *
641 * @param[in] node Node to create the backend function for
642 * @param[in] ctx Graph context
643 *
644 * @return Backend deconvolution layer function
645 */
646template <typename DeconvolutionLayerFunction, typename TargetInfo>
647std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
648{
649 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
650
651 // Extract IO and info
652 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
653 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
654 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
655 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
656
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100657 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100658
659 // Create and configure function (we assume that functions have been validated before creation)
660 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
661 std::unique_ptr<IFunction> func;
662
663 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
664 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100665 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100666
667 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000668 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
669 << node.name()
670 << " Type: " << node.type()
671 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100672 << " Data Type: " << input->info()->data_type()
673 << " Input shape: " << input->info()->tensor_shape()
674 << " Weights shape: " << weights->info()->tensor_shape()
675 << " Output shape: " << output->info()->tensor_shape()
676 << std::endl);
677 return func;
678}
679
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100680/** Create a backend layer depth-wise convolution function
681 *
682 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
683 * @tparam TargetInfo Target-specific information
684 *
685 * @param[in] node Node to create the backend function for
686 *
687 * @return Backend depth-wise convolution layer function
688 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100689template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100690std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
691{
692 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
693
694 // Extract IO and info
695 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
696 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
697 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
698 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
699
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100700 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
701
702 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100703 {
704 biases->info()->set_data_type(DataType::S32);
705 }
706
Manuel Bottini05069f02019-09-26 17:18:26 +0100707 const PadStrideInfo conv_info = node.convolution_info();
708 const unsigned int depth_multiplier = node.depth_multiplier();
709 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100710
711 // Create and configure function (we assume that functions have been validated before creation)
712 std::unique_ptr<IFunction> func;
713 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100714
715 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
716 std::string("DepthwiseConvolutionLayer"),
717 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100718
719 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100720 std::ostringstream qss;
721 if(is_quantized)
722 {
723 qss << " Input QuantInfo: " << input->info()->quantization_info()
724 << " Weights QuantInfo: " << weights->info()->quantization_info()
725 << " Output QuantInfo: " << output->info()->quantization_info();
726 }
Pablo Tello32521432018-11-15 14:43:10 +0000727 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
728 << node.name()
729 << " Type: " << func_name
730 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100731 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100732 << " Input shape: " << input->info()->tensor_shape()
733 << " Weights shape: " << weights->info()->tensor_shape()
734 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000735 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000736 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100737 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100738 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100739 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100740}
741
thecha010a05e6a2020-08-28 18:40:38 +0100742/** Create a backend depth to space layer function
743 *
744 * @tparam DepthToSpaceLayerNode Function Backend depth to space function
745 * @tparam TargetInfo Target-specific information
746 *
747 * @param[in] node Node to create the backend function for
748 *
749 * @return Backend depth to space layer function
750 */
751template <typename DepthToSpaceLayerFunction, typename TargetInfo>
752std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &node)
753{
754 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
755
756 // Extract IO and info
757 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
758 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
759
760 ARM_COMPUTE_ERROR_ON(input == nullptr);
761 ARM_COMPUTE_ERROR_ON(output == nullptr);
762
763 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000764 auto func = std::make_unique<DepthToSpaceLayerFunction>();
thecha010a05e6a2020-08-28 18:40:38 +0100765 func->configure(input, output, node.block_shape());
766
767 // Log info
768 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
769 << node.name()
770 << " Type: " << node.type()
771 << " Target: " << TargetInfo::TargetType
772 << " Data Type: " << input->info()->data_type()
773 << " Input shape: " << input->info()->tensor_shape()
774 << " Block Size: " << node.block_shape()
775 << " Output shape: " << output->info()->tensor_shape()
776 << std::endl);
777
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100778 return std::move(func);
thecha010a05e6a2020-08-28 18:40:38 +0100779}
780
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000781/** Create a backend dequantize layer function
782 *
783 * @tparam DequantizationLayer Function Backend dequantize function
784 * @tparam TargetInfo Target-specific information
785 *
786 * @param[in] node Node to create the backend function for
787 *
788 * @return Backend dequantize layer function
789 */
790template <typename DequantizationLayerFunction, typename TargetInfo>
791std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
792{
793 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
794
795 // Extract IO and info
796 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
797 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
798
799 ARM_COMPUTE_ERROR_ON(input == nullptr);
800 ARM_COMPUTE_ERROR_ON(output == nullptr);
801
802 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000803 auto func = std::make_unique<DequantizationLayerFunction>();
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000804 func->configure(input, output);
805
806 // Log info
807 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
808 << node.name()
809 << " Type: " << node.type()
810 << " Target: " << TargetInfo::TargetType
811 << " Data Type: " << input->info()->data_type()
812 << " Input shape: " << input->info()->tensor_shape()
813 << " Input quantization info: " << output->info()->quantization_info()
814 << " Output shape: " << output->info()->tensor_shape()
815 << std::endl);
816
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100817 return std::move(func);
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000818}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000819/** Create a backend detection output layer function
820 *
821 * @tparam DetectionOutputLayer Function Backend detection output function
822 * @tparam TargetInfo Target-specific information
823 *
824 * @param[in] node Node to create the backend function for
825 *
826 * @return Backend detection output layer function
827 */
828template <typename DetectionOutputLayerFunction, typename TargetInfo>
829std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
830{
831 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
832
833 // Extract IO and info
834 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
835 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
836 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
837 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
838 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
839
840 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
841 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
842 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
843 ARM_COMPUTE_ERROR_ON(output == nullptr);
844
845 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000846 auto func = std::make_unique<DetectionOutputLayerFunction>();
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000847 func->configure(input0, input1, input2, output, detect_info);
848
849 // Log info
850 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
851 << node.name()
852 << " Type: " << node.type()
853 << " Target: " << TargetInfo::TargetType
854 << " Data Type: " << input0->info()->data_type()
855 << " Input0 shape: " << input0->info()->tensor_shape()
856 << " Input1 shape: " << input1->info()->tensor_shape()
857 << " Input2 shape: " << input2->info()->tensor_shape()
858 << " Output shape: " << output->info()->tensor_shape()
859 << " DetectionOutputLayer info: " << detect_info
860 << std::endl);
861
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100862 return std::move(func);
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000863}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000864
865/** Create a backend detection post process layer function
866 *
867 * @tparam DetectionPostProcessLayerFunction Backend detection output function
868 * @tparam TargetInfo Target-specific information
869 *
870 * @param[in] node Node to create the backend function for
871 *
872 * @return Backend detection post process layer function
873 */
874template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
875std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
876{
877 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
878
879 // Extract IO and info
880 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
881 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
882 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
883 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
884 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
885 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
886 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
887 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
888
889 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
890 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
891 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
892 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
893 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
894 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
895 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
896
897 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000898 auto func = std::make_unique<DetectionPostProcessLayerFunction>();
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000899 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
900
901 // Log info
902 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
903 << node.name()
904 << " Type: " << node.type()
905 << " Target: " << TargetInfo::TargetType
906 << " Data Type: " << input0->info()->data_type()
907 << " Input0 shape: " << input0->info()->tensor_shape()
908 << " Input1 shape: " << input1->info()->tensor_shape()
909 << " Input2 shape: " << input2->info()->tensor_shape()
910 << " Output0 shape: " << output0->info()->tensor_shape()
911 << " Output1 shape: " << output1->info()->tensor_shape()
912 << " Output2 shape: " << output2->info()->tensor_shape()
913 << " Output3 shape: " << output3->info()->tensor_shape()
914 << " DetectionPostProcessLayer info: " << detect_info
915 << std::endl);
916
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100917 return std::move(func);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000918}
919
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100920/** Create a backend element-wise operation layer function
921 *
922 * @tparam EltwiseFunctions Backend element-wise function
923 * @tparam TargetInfo Target-specific information
924 *
925 * @param[in] node Node to create the backend function for
926 *
927 * @return Backend element-wise operation layer function
928 */
929template <typename EltwiseFunctions, typename TargetInfo>
930std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
931{
932 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
933
934 // Extract IO and info
935 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
936 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
937 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
938 const EltwiseOperation eltwise_op = node.eltwise_operation();
939 const ConvertPolicy convert_policy = node.convert_policy();
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000940 const ActivationLayerInfo act_info = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100941 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
942 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
943 ARM_COMPUTE_ERROR_ON(output == nullptr);
944
945 std::unique_ptr<IFunction> func = nullptr;
946 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100947 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100948 {
949 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
950 std::string("ArithmeticAddition"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000951 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100952 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100953 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100954 {
955 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
956 std::string("ArithmeticSubtraction"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000957 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100958 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100959 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100960 {
961 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
962 std::string("PixelWiseMultiplication"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000963 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100964 }
thecha01f8e35842020-07-28 17:28:17 +0100965 else if(eltwise_op == EltwiseOperation::Max)
966 {
967 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
968 std::string("ElementwiseMaximum"),
969 input1, input2, output, act_info);
970 }
Alessandro Navone6413e492021-02-02 11:39:05 +0000971 else if(eltwise_op == EltwiseOperation::Div)
972 {
973 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
974 std::string("ArithmeticDivision"),
975 input1, input2, output, act_info);
976 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100977 else
978 {
979 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
980 }
981
982 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000983 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
984 << node.name()
985 << " Type: " << node.type()
986 << " Target: " << TargetInfo::TargetType
987 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100988 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000989 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100990 << std::endl);
991
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100992 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100993}
994
Sheri Zhang16dddd22020-05-27 15:03:48 +0100995/** Create a backend unary element-wise operation layer function
996 *
997 * @tparam UnaryEltwiseFunctions Backend unary element-wise function
998 * @tparam TargetInfo Target-specific information
999 *
1000 * @param[in] node Node to create the backend function for
1001 *
1002 * @return Backend unary element-wise operation layer function
1003 */
1004template <typename UnaryEltwiseFunctions, typename TargetInfo>
1005std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
1006{
1007 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1008
1009 // Extract IO and info
1010 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1011 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1012 const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
1013
1014 ARM_COMPUTE_ERROR_ON(input == nullptr);
1015 ARM_COMPUTE_ERROR_ON(output == nullptr);
1016
1017 std::unique_ptr<IFunction> func = nullptr;
1018 std::string func_name;
1019 if(eltwise_op == UnaryEltwiseOperation::Exp)
1020 {
1021 std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
1022 std::string("Exp"),
1023 input, output);
1024 }
1025 else
1026 {
1027 ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
1028 }
1029
1030 // Log info
1031 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1032 << node.name()
1033 << " Type: " << node.type()
1034 << " Target: " << TargetInfo::TargetType
1035 << " Operation: " << func_name
1036 << " Data Type: " << input->info()->data_type()
1037 << " Shape: " << input->info()->tensor_shape()
1038 << std::endl);
1039
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001040 return std::move(func);
Sheri Zhang16dddd22020-05-27 15:03:48 +01001041}
1042
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001043/** Create a backend flatten layer function
1044 *
1045 * @tparam FlattenLayerFunction Backend flatten function
1046 * @tparam TargetInfo Target-specific information
1047 *
1048 * @param[in] node Node to create the backend function for
1049 *
1050 * @return Backend flatten layer function
1051 */
1052template <typename FlattenLayerFunction, typename TargetInfo>
1053std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
1054{
1055 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1056
1057 // Extract IO and info
1058 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1059 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1060
Georgios Pinitase2220552018-07-20 13:23:44 +01001061 ARM_COMPUTE_ERROR_ON(input == nullptr);
1062 ARM_COMPUTE_ERROR_ON(output == nullptr);
1063
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001064 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001065 auto func = std::make_unique<FlattenLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001066 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001067
1068 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001069 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1070 << node.name()
1071 << " Type: " << node.type()
1072 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001073 << " Data Type: " << input->info()->data_type()
1074 << " Input shape: " << input->info()->tensor_shape()
1075 << " Output shape: " << output->info()->tensor_shape()
1076 << std::endl);
1077
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001078 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001079}
1080
1081/** Create a backend fully connected layer function
1082 *
1083 * @tparam FullyConnectedLayerFunction Backend fully-connected function
1084 * @tparam TargetInfo Target-specific information
1085 *
1086 * @param[in] node Node to create the backend function for
1087 * @param[in] ctx Graph context
1088 *
1089 * @return Backend fully connected layer function
1090 */
1091template <typename FullyConnectedLayerFunction, typename TargetInfo>
1092std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
1093{
1094 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1095
1096 // Extract IO and info
1097 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1098 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1099 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
1100 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
cfRodf2c022e2021-11-05 11:29:53 +00001101 FullyConnectedLayerInfo fc_info = node.info();
1102 fc_info.enable_fast_math = (node.fast_math_hint() == FastMathHint::Enabled);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001103
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001104 ARM_COMPUTE_ERROR_ON(input == nullptr);
1105 ARM_COMPUTE_ERROR_ON(weights == nullptr);
1106 ARM_COMPUTE_ERROR_ON(output == nullptr);
1107
Georgios Pinitase2220552018-07-20 13:23:44 +01001108 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +01001109 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
1110 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001111 auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +01001112 func->configure(input, weights, biases, output, fc_info);
1113
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001114 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1115
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001116 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001117 std::ostringstream qss;
1118 if(is_quantized)
1119 {
1120 qss << " Input QuantInfo: " << input->info()->quantization_info()
1121 << " Weights QuantInfo: " << weights->info()->quantization_info()
1122 << " Output QuantInfo: " << output->info()->quantization_info();
1123 }
Pablo Tello32521432018-11-15 14:43:10 +00001124 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1125 << node.name()
1126 << " Type: " << node.type()
1127 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001128 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001129 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001130 << " Input shape: " << input->info()->tensor_shape()
1131 << " Weights shape: " << weights->info()->tensor_shape()
1132 << " Output shape: " << output->info()->tensor_shape()
1133 << std::endl);
1134
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001135 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001136}
1137
Manuel Bottini5209be52019-02-13 16:34:56 +00001138/** Create a backend generate proposals layer function
1139 *
1140 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
1141 * @tparam TargetInfo Target-specific information
1142 *
1143 * @param[in] node Node to create the backend function for
1144 * @param[in] ctx Graph context
1145 *
1146 * @return Backend generate proposals layer function
1147 */
1148template <typename GenerateProposalsLayerFunction, typename TargetInfo>
1149std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
1150{
1151 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1152
1153 // Extract IO and info
1154 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1155 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1156 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1157 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1158 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1159 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1160 const GenerateProposalsInfo info = node.info();
1161
1162 ARM_COMPUTE_ERROR_ON(scores == nullptr);
1163 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1164 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1165 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1166 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1167
1168 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001169 auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
Manuel Bottini5209be52019-02-13 16:34:56 +00001170 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1171
1172 // Log info
1173 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1174 << " Target " << TargetInfo::TargetType
1175 << " Data Type: " << scores->info()->data_type()
1176 << " Scores shape: " << scores->info()->tensor_shape()
1177 << " Deltas shape: " << deltas->info()->tensor_shape()
1178 << " Anchors shape: " << anchors->info()->tensor_shape()
1179 << " Proposals shape: " << proposals->info()->tensor_shape()
1180 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1181 << " Scores Out shape: " << scores_out->info()->tensor_shape()
1182 << std::endl);
1183
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001184 return std::move(func);
Manuel Bottini5209be52019-02-13 16:34:56 +00001185}
1186
thecha013603aff2020-09-01 14:52:38 +01001187/** Create a backend l2 normalization layer function
1188 *
1189 * @tparam NormalizationLayerFunction Backend normalization function
1190 * @tparam TargetInfo Target-specific information
1191 *
1192 * @param[in] node Node to create the backend function for
1193 * @param[in] ctx Graph context
1194 *
1195 * @return Backend normalization layer function
1196 */
1197template <typename L2NormalizeLayerFunction, typename TargetInfo>
1198std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
1199{
1200 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1201
1202 // Extract IO and info
1203 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1204 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1205 int axis = node.axis();
1206 float epsilon = node.epsilon();
1207
1208 ARM_COMPUTE_ERROR_ON(input == nullptr);
1209 ARM_COMPUTE_ERROR_ON(output == nullptr);
1210
1211 // Create and configure function
1212 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001213 auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
thecha013603aff2020-09-01 14:52:38 +01001214 func->configure(input, output, axis, epsilon);
1215
1216 // Log info
1217 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1218 << node.name()
1219 << " Type: " << node.type()
1220 << " Target: " << TargetInfo::TargetType
1221 << " Data Type: " << input->info()->data_type()
1222 << " Input shape: " << input->info()->tensor_shape()
1223 << " Output shape: " << output->info()->tensor_shape()
1224 << " Axis: " << axis
1225 << " Epsilon: " << epsilon
1226 << std::endl);
1227
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001228 return std::move(func);
thecha013603aff2020-09-01 14:52:38 +01001229}
1230
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001231/** Create a backend normalization layer function
1232 *
1233 * @tparam NormalizationLayerFunction Backend normalization function
1234 * @tparam TargetInfo Target-specific information
1235 *
1236 * @param[in] node Node to create the backend function for
1237 * @param[in] ctx Graph context
1238 *
1239 * @return Backend normalization layer function
1240 */
1241template <typename NormalizationLayerFunction, typename TargetInfo>
1242std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1243{
1244 ARM_COMPUTE_UNUSED(ctx);
1245
1246 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1247
1248 // Extract IO and info
1249 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1250 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1251 const NormalizationLayerInfo norm_info = node.normalization_info();
1252 ARM_COMPUTE_ERROR_ON(input == nullptr);
1253 ARM_COMPUTE_ERROR_ON(output == nullptr);
1254
1255 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001256 auto func = std::make_unique<NormalizationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001257 func->configure(input, output, norm_info);
1258
1259 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001260 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1261 << node.name()
1262 << " Type: " << node.type()
1263 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001264 << " Data Type: " << input->info()->data_type()
1265 << " Input shape: " << input->info()->tensor_shape()
1266 << " Output shape: " << output->info()->tensor_shape()
1267 << " Normalization info: " << norm_info.type()
1268 << std::endl);
1269
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001270 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001271}
1272
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001273/** Create a backend normalize planar YUV layer function
1274 *
1275 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1276 * @tparam TargetInfo Target-specific information
1277 *
1278 * @param[in] node Node to create the backend function for
1279 *
1280 * @return Backend normalize plnar YUV layer function
1281 */
1282template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1283std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1284{
1285 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1286
1287 // Extract IO and info
1288 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1289 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1290 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1291 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1292 ARM_COMPUTE_ERROR_ON(input == nullptr);
1293 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1294 ARM_COMPUTE_ERROR_ON(std == nullptr);
1295 ARM_COMPUTE_ERROR_ON(output == nullptr);
1296
1297 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001298 auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001299 func->configure(input, output, mean, std);
1300
1301 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001302 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1303 << node.name()
1304 << " Type: " << node.type()
1305 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001306 << " Data Type: " << input->info()->data_type()
1307 << " Shape: " << input->info()->tensor_shape()
1308 << std::endl);
1309
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001310 return std::move(func);
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001311}
1312
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001313/** Create a backend pad layer function
1314 *
1315 * @tparam PadLayerFunction Backend pad function
1316 * @tparam TargetInfo Target-specific information
1317 *
1318 * @param[in] node Node to create the backend function for
1319 *
1320 * @return Backend pad layer function
1321 */
1322template <typename PadLayerFunction, typename TargetInfo>
1323std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1324{
1325 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1326
1327 // Extract IO and info
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001328 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1329 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1330 const PaddingList &padding = node.padding();
1331 const PixelValue pad_value = node.pad_value();
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001332 ARM_COMPUTE_ERROR_ON(input == nullptr);
1333 ARM_COMPUTE_ERROR_ON(output == nullptr);
1334
1335 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001336 auto func = std::make_unique<PadLayerFunction>();
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001337 func->configure(input, output, padding, pad_value);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001338
1339 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001340 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1341 << node.name()
1342 << " Type: " << node.type()
1343 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001344 << " Data Type: " << input->info()->data_type()
1345 << " Input shape: " << input->info()->tensor_shape()
1346 << " Output shape: " << output->info()->tensor_shape()
1347 << std::endl);
1348
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001349 return std::move(func);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001350}
1351
Georgios Pinitas57c48242018-08-02 13:41:49 +01001352/** Create a backend permute layer function
1353 *
1354 * @tparam PermuteLayerFunction Backend permute function
1355 * @tparam TargetInfo Target-specific information
1356 *
1357 * @param[in] node Node to create the backend function for
1358 *
1359 * @return Backend permute layer function
1360 */
1361template <typename PermuteLayerFunction, typename TargetInfo>
1362std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1363{
1364 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1365
1366 // Extract IO and info
1367 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1368 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1369 const PermutationVector &perm = node.permutation_vector();
1370 ARM_COMPUTE_ERROR_ON(input == nullptr);
1371 ARM_COMPUTE_ERROR_ON(output == nullptr);
1372
1373 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001374 auto func = std::make_unique<PermuteLayerFunction>();
Georgios Pinitas57c48242018-08-02 13:41:49 +01001375 func->configure(input, output, perm);
1376
1377 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001378 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1379 << node.name()
1380 << " Type: " << node.type()
1381 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001382 << " Data Type: " << input->info()->data_type()
1383 << " Input shape: " << input->info()->tensor_shape()
1384 << " Output shape: " << output->info()->tensor_shape()
1385 << " Permutation vector: " << perm
1386 << std::endl);
1387
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001388 return std::move(func);
Georgios Pinitas57c48242018-08-02 13:41:49 +01001389}
1390
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001391/** Create a backend pooling layer function
1392 *
1393 * @tparam PoolingLayerFunction Backend pooling function
1394 * @tparam TargetInfo Target-specific information
1395 *
1396 * @param[in] node Node to create the backend function for
1397 *
1398 * @return Backend pooling layer function
1399 */
1400template <typename PoolingLayerFunction, typename TargetInfo>
1401std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1402{
1403 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1404
1405 // Extract IO and info
1406 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1407 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1408 const PoolingLayerInfo pool_info = node.pooling_info();
1409 ARM_COMPUTE_ERROR_ON(input == nullptr);
1410 ARM_COMPUTE_ERROR_ON(output == nullptr);
1411
1412 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001413 auto func = std::make_unique<PoolingLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001414 func->configure(input, output, pool_info);
1415
1416 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001417 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1418 << node.name()
1419 << " Type: " << node.type()
1420 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001421 << " Data Type: " << input->info()->data_type()
1422 << " Input shape: " << input->info()->tensor_shape()
1423 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001424 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001425 << std::endl);
1426
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001427 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001428}
1429
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001430/** Create a backend PRelu layer function
1431 *
1432 * @tparam PReluFunction Backend PRelu function
1433 * @tparam TargetInfo Target-specific information
1434 *
1435 * @param[in] node Node to create the backend function for
1436 *
1437 * @return Backend PRelu layer function
1438 */
1439template <typename PReluFunction, typename TargetInfo>
1440std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1441{
1442 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1443
1444 // Extract IO and info
1445 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1446 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1447 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1448 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1449 ARM_COMPUTE_ERROR_ON(output == nullptr);
1450
1451 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001452 auto func = std::make_unique<PReluFunction>();
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001453 func->configure(input, alpha, output);
1454
1455 // Log info
1456 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1457 << node.name()
1458 << " Type: " << node.type()
1459 << " Target: " << TargetInfo::TargetType
1460 << " Data Type: " << input->info()->data_type()
1461 << " Input shape: " << input->info()->tensor_shape()
1462 << " Output shape: " << output->info()->tensor_shape()
1463 << std::endl);
1464
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001465 return std::move(func);
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001466}
1467
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001468/** Create a backend print layer function
1469 *
1470 * @tparam TargetInfo Target-specific information
1471 *
1472 * @param[in] node Node to create the backend function for
1473 *
1474 * @return Backend print layer function
1475 */
1476template <typename TargetInfo>
1477std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1478{
1479 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1480
1481 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1482 ARM_COMPUTE_ERROR_ON(input == nullptr);
1483 ARM_COMPUTE_UNUSED(input);
1484
1485 // Log info
1486 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1487 << node.name()
1488 << " Type: " << node.type()
1489 << " Target: " << TargetInfo::TargetType
1490 << " Data Type: " << input->info()->data_type()
1491 << " Input shape: " << input->info()->tensor_shape()
1492 << std::endl);
1493
1494 return nullptr;
1495}
1496
Pablo Tello32521432018-11-15 14:43:10 +00001497/** Create a backend priorbox layer function
1498 *
1499 * @tparam PriorBoxLayerFunction Backend priorbox function
1500 * @tparam TargetInfo Target-specific information
1501 *
1502 * @param[in] node Node to create the backend function for
1503 *
1504 * @return Backend priorbox layer function
1505 */
1506template <typename PriorBoxLayerFunction, typename TargetInfo>
1507std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1508{
1509 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1510
1511 // Extract IO and info
1512 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1513 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1514 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1515 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1516 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1517 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1518 ARM_COMPUTE_ERROR_ON(output == nullptr);
1519
1520 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001521 auto func = std::make_unique<PriorBoxLayerFunction>();
Pablo Tello32521432018-11-15 14:43:10 +00001522 func->configure(input0, input1, output, prior_info);
1523
1524 // Log info
1525 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1526 << node.name()
1527 << " Type: " << node.type()
1528 << " Target: " << TargetInfo::TargetType
1529 << " Data Type: " << input0->info()->data_type()
1530 << " Input0 shape: " << input0->info()->tensor_shape()
1531 << " Input1 shape: " << input1->info()->tensor_shape()
1532 << " Output shape: " << output->info()->tensor_shape()
1533 << " PriorBoxLayer info: " << prior_info
1534 << std::endl);
1535
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001536 return std::move(func);
Pablo Tello32521432018-11-15 14:43:10 +00001537}
1538
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001539/** Create a backend quantization layer function
1540 *
1541 * @tparam QuantizationLayerFunction Backend quantization function
1542 * @tparam TargetInfo Target-specific information
1543 *
1544 * @param[in] node Node to create the backend function for
1545 *
1546 * @return Backend quantization layer function
1547 */
1548template <typename QuantizationLayerFunction, typename TargetInfo>
1549std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1550{
1551 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1552
1553 // Extract IO and info
1554 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1555 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1556 ARM_COMPUTE_ERROR_ON(input == nullptr);
1557 ARM_COMPUTE_ERROR_ON(output == nullptr);
1558
1559 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001560 auto func = std::make_unique<QuantizationLayerFunction>();
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001561 func->configure(input, output);
1562
1563 // Log info
1564 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1565 << node.name()
1566 << " Type: " << node.type()
1567 << " Target: " << TargetInfo::TargetType
1568 << " Data Type: " << input->info()->data_type()
1569 << " Input shape: " << input->info()->tensor_shape()
1570 << " Output shape: " << output->info()->tensor_shape()
1571 << std::endl);
1572
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001573 return std::move(func);
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001574}
1575
thecha01d64444b2020-09-07 14:50:21 +01001576/** Create a backend reduction operation layer function
1577 *
1578 * @tparam ReductionOperationFunction Backend reduction operation function
1579 * @tparam TargetInfo Target-specific information
1580 *
1581 * @param[in] node Node to create the backend function for
1582 * @param[in] ctx Graph context
1583 *
1584 * @return Backend reduction sum layer function
1585 */
1586template <typename ReductionOperationFunction, typename TargetInfo>
1587std::unique_ptr<IFunction> create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
1588{
1589 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1590
1591 // Extract IO and info
1592 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1593 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1594 ReductionOperation op = node.op();
1595 int axis = node.axis();
1596 bool keep_dims = node.keep_dims();
1597 ARM_COMPUTE_ERROR_ON(input == nullptr);
1598 ARM_COMPUTE_ERROR_ON(output == nullptr);
1599
1600 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001601 auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
thecha01d64444b2020-09-07 14:50:21 +01001602 func->configure(input, output, axis, op, keep_dims);
1603
1604 // Log info
1605 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1606 << node.name()
1607 << " Type: " << node.type()
1608 << " Target: " << TargetInfo::TargetType
1609 << " Data Type: " << input->info()->data_type()
1610 << " Input shape: " << input->info()->tensor_shape()
1611 << " Output shape: " << output->info()->tensor_shape()
1612 << " Operation: " << op
1613 << " Axis: " << axis
1614 << " Keep dimensions:" << keep_dims
1615 << std::endl);
1616
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001617 return std::move(func);
thecha01d64444b2020-09-07 14:50:21 +01001618}
1619
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001620/** Create a backend reorg layer function
1621 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001622 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001623 * @tparam TargetInfo Target-specific information
1624 *
1625 * @param[in] node Node to create the backend function for
1626 *
1627 * @return Backend reshape layer function
1628 */
1629template <typename ReorgLayerFunction, typename TargetInfo>
1630std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1631{
1632 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1633
1634 // Extract IO and info
1635 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1636 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1637 ARM_COMPUTE_ERROR_ON(input == nullptr);
1638 ARM_COMPUTE_ERROR_ON(output == nullptr);
1639
1640 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001641 auto func = std::make_unique<ReorgLayerFunction>();
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001642 func->configure(input, output, node.stride());
1643
1644 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001645 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1646 << node.name()
1647 << " Type: " << node.type()
1648 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001649 << " Data Type: " << input->info()->data_type()
1650 << " Input shape: " << input->info()->tensor_shape()
1651 << " Output shape: " << output->info()->tensor_shape()
1652 << std::endl);
1653
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001654 return std::move(func);
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001655}
1656
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001657/** Create a backend reshape layer function
1658 *
1659 * @tparam ReshapeLayerFunction Backend reshape function
1660 * @tparam TargetInfo Target-specific information
1661 *
1662 * @param[in] node Node to create the backend function for
1663 *
1664 * @return Backend reshape layer function
1665 */
1666template <typename ReshapeLayerFunction, typename TargetInfo>
1667std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1668{
1669 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1670
1671 // Extract IO and info
1672 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1673 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1674 ARM_COMPUTE_ERROR_ON(input == nullptr);
1675 ARM_COMPUTE_ERROR_ON(output == nullptr);
1676
1677 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001678 auto func = std::make_unique<ReshapeLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001679 func->configure(input, output);
1680
1681 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001682 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1683 << node.name()
1684 << " Type: " << node.type()
1685 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001686 << " Data Type: " << input->info()->data_type()
1687 << " Input shape: " << input->info()->tensor_shape()
1688 << " Output shape: " << output->info()->tensor_shape()
1689 << std::endl);
1690
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001691 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001692}
1693
1694/** Create a backend resize layer function
1695 *
1696 * @tparam ResizeLayerFunction Backend resize function
1697 * @tparam TargetInfo Target-specific information
1698 *
1699 * @param[in] node Node to create the backend function for
1700 *
1701 * @return Backend resize layer function
1702 */
1703template <typename ResizeLayerFunction, typename TargetInfo>
1704std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1705{
1706 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1707
1708 // Extract IO and info
1709 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1710 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1711 ARM_COMPUTE_ERROR_ON(input == nullptr);
1712 ARM_COMPUTE_ERROR_ON(output == nullptr);
1713 const InterpolationPolicy policy = node.policy();
1714
1715 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001716 auto func = std::make_unique<ResizeLayerFunction>();
Georgios Pinitasc53266e2020-12-09 03:11:53 +00001717 func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001718
1719 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001720 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1721 << node.name()
1722 << " Type: " << node.type()
1723 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001724 << " Data Type: " << input->info()->data_type()
1725 << " Input shape: " << input->info()->tensor_shape()
1726 << " Output shape: " << output->info()->tensor_shape()
1727 << " Interpolation: " << policy
1728 << std::endl);
1729
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001730 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001731}
1732
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001733/** Create a backend ROI align layer function
1734 *
1735 * @tparam ROIAlignLayerFunction ROI Align function
1736 * @tparam TargetInfo Target-specific information
1737 *
1738 * @param[in] node Node to create the backend function for
1739 *
1740 * @return ROI Align layer function
1741 */
1742template <typename ROIAlignLayerFunction, typename TargetInfo>
1743std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1744{
1745 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1746
1747 // Extract IO and info
1748 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1749 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1750 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1751 ARM_COMPUTE_ERROR_ON(input == nullptr);
1752 ARM_COMPUTE_ERROR_ON(output == nullptr);
1753 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1754
1755 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1756
1757 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001758 auto func = std::make_unique<ROIAlignLayerFunction>();
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001759
1760 func->configure(input, rois, output, pool_info);
1761
1762 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001763 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1764 << node.name()
1765 << " Type: " << node.type()
1766 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001767 << " Data Type: " << input->info()->data_type()
1768 << " Input shape: " << input->info()->tensor_shape()
1769 << " Output shape: " << output->info()->tensor_shape()
1770 << " ROIs shape: " << rois->info()->tensor_shape()
1771 << " ROIPooling width: " << pool_info.pooled_width()
1772 << " ROIPooling height: " << pool_info.pooled_height()
1773 << std::endl);
1774
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001775 return std::move(func);
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001776}
1777
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001778/** Create a backend slice layer function
1779 *
1780 * @tparam SliceLayerFunction Backend slice function
1781 * @tparam TargetInfo Target-specific information
1782 *
1783 * @param[in] node Node to create the backend function for
1784 *
1785 * @return Backend slice layer function
1786 */
1787template <typename SliceLayerFunction, typename TargetInfo>
1788std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1789{
1790 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1791
1792 // Extract IO and info
1793 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1794 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1795 ARM_COMPUTE_ERROR_ON(input == nullptr);
1796 ARM_COMPUTE_ERROR_ON(output == nullptr);
1797
1798 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001799 auto func = std::make_unique<SliceLayerFunction>();
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001800 func->configure(input, output, node.starts(), node.ends());
1801
1802 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001803 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1804 << node.name()
1805 << " Type: " << node.type()
1806 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001807 << " Data Type: " << input->info()->data_type()
1808 << " Input shape: " << input->info()->tensor_shape()
1809 << " Output shape: " << output->info()->tensor_shape()
1810 << std::endl);
1811
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001812 return std::move(func);
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001813}
1814
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001815/** Create a backend softmax layer function
1816 *
1817 * @tparam SoftmaxLayerFunction Backend softmax function
1818 * @tparam TargetInfo Target-specific information
1819 *
1820 * @param[in] node Node to create the backend function for
1821 * @param[in] ctx Graph context
1822 *
1823 * @return Backend softmax layer function
1824 */
1825template <typename SoftmaxLayerFunction, typename TargetInfo>
1826std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1827{
1828 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1829
1830 // Extract IO and info
1831 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1832 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1833 const float beta = node.beta();
1834 ARM_COMPUTE_ERROR_ON(input == nullptr);
1835 ARM_COMPUTE_ERROR_ON(output == nullptr);
1836
1837 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001838 auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001839 func->configure(input, output, beta);
1840
1841 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001842 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1843 << node.name()
1844 << " Type: " << node.type()
1845 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001846 << " Data Type: " << input->info()->data_type()
1847 << " Input shape: " << input->info()->tensor_shape()
1848 << " Output shape: " << output->info()->tensor_shape()
1849 << std::endl);
1850
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001851 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001852}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001853
1854/** Create a backend layer stack function
1855 *
1856 * @tparam StackLayerFunction Backend stack function
1857 * @tparam TargetInfo Target-specific information
1858 *
1859 * @param[in] node Node to create the backend function for
1860 *
1861 * @return Backend stack layer function
1862 */
1863template <typename StackLayerFunction, typename TargetInfo>
1864std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1865{
1866 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1867 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1868
1869 // Extract IO and info
1870 std::vector<typename TargetInfo::TensorType *> inputs;
1871 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1872 {
1873 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1874 }
1875 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1876 const int axis = node.axis();
1877
1878 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001879 auto func = std::make_unique<StackLayerFunction>();
Michele Di Giorgioec699752019-03-22 15:25:32 +00001880 func->configure(inputs, axis, output);
1881
1882 // Log info
1883 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1884 << node.name()
1885 << " Type: " << node.type()
1886 << " Target: " << TargetInfo::TargetType
1887 << " Data Type: " << output->info()->data_type()
1888 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1889 << " Output shape: " << output->info()->tensor_shape()
1890 << " Num Inputs: " << inputs.size()
1891 << " Axis: " << axis
1892 << std::endl);
1893
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001894 return std::move(func);
Michele Di Giorgioec699752019-03-22 15:25:32 +00001895}
thecha012bfadd92020-08-12 17:25:51 +01001896
1897/** Create a backend slice layer function
1898 *
1899 * @tparam StridedSliceLayerFunction Backend strided slice function
1900 * @tparam TargetInfo Target-specific information
1901 *
1902 * @param[in] node Node to create the backend function for
1903 *
1904 * @return Backend strided slice layer function
1905 */
1906template <typename StridedSliceLayerFunction, typename TargetInfo>
1907std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &node)
1908{
1909 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1910
1911 // Extract IO and info
1912 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1913 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1914 Coordinates starts = node.starts();
1915 Coordinates ends = node.ends();
1916 BiStrides strides = node.strides();
1917 StridedSliceLayerInfo info = node.strided_slice_info();
1918
1919 ARM_COMPUTE_ERROR_ON(input == nullptr);
1920 ARM_COMPUTE_ERROR_ON(output == nullptr);
1921
1922 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001923 auto func = std::make_unique<StridedSliceLayerFunction>();
thecha012bfadd92020-08-12 17:25:51 +01001924 func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
1925
1926 // Log info
1927 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1928 << node.name()
1929 << " Type: " << node.type()
1930 << " Target: " << TargetInfo::TargetType
1931 << " Data Type: " << input->info()->data_type()
1932 << " Input shape: " << input->info()->tensor_shape()
1933 << " Output shape: " << output->info()->tensor_shape()
1934 << std::endl);
1935
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001936 return std::move(func);
thecha012bfadd92020-08-12 17:25:51 +01001937}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001938} // namespace detail
1939} // namespace backends
1940} // namespace graph
1941} // namespace arm_compute
1942
Michalis Spyrouf4643372019-11-29 16:17:13 +00001943#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */