blob: a7e52d4d6d661ba394f333386ceb41f606ac0d6e [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Alessandro Navone6413e492021-02-02 11:39:05 +00002 * Copyright (c) 2018-2021 Arm Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
SiCongLi31778612021-11-12 17:33:45 +000027#include "arm_compute/core/experimental/IPostOp.h"
28#include "arm_compute/core/experimental/PostOps.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010029#include "arm_compute/graph/Logger.h"
30#include "arm_compute/graph/Tensor.h"
31#include "arm_compute/graph/TypePrinter.h"
32#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010033#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000034#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
ramelg01b75d6242021-11-26 19:12:40 +000035#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationWithPostOpsFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010036#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010037#include "arm_compute/graph/backends/Utils.h"
38#include "arm_compute/graph/nodes/Nodes.h"
39
40#include "arm_compute/core/Error.h"
41#include "arm_compute/core/Helpers.h"
42#include "arm_compute/core/ITensorInfo.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010043#include "support/Cast.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010044
45namespace arm_compute
46{
47namespace graph
48{
49namespace backends
50{
51namespace detail
52{
53/** Returns backing tensor of a given tensor
54 *
55 * @tparam TargetInfo Target information
56 *
57 * @param[in] tensor Tensor to extract the backing tensor from
58 *
59 * @return Backing tensor if present else nullptr
60 */
61template <typename TargetInfo>
62typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
63{
64 typename TargetInfo::TensorType *backing_tensor = nullptr;
65 if(tensor != nullptr)
66 {
67 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
68 // Get backing tensor handle
69 ITensorHandle *tensor_handle = tensor->handle();
70 // Get backing tensor
71 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
72 }
73
74 return backing_tensor;
75}
76
77template <typename TargetInfo>
78void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
79{
80 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000081 << " Target: " << TargetInfo::TargetType
82 << " ID: " << node.id()
83 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010084 << std::endl);
85
86 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
87 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
88 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010089 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010090}
91
92/** Creates a backend activation layer function
93 *
94 * @tparam ActivationLayerFunction Backend activation function
95 * @tparam TargetInfo Target-specific information
96 *
97 * @param[in] node Node to create the backend function for
98 *
99 * @return Backend activation layer function
100 */
101template <typename ActivationLayerFunction, typename TargetInfo>
102std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
103{
104 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
105
106 // Extract IO and info
107 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
108 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
109 const ActivationLayerInfo act_info = node.activation_info();
110
111 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000112 auto func = std::make_unique<ActivationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100113 func->configure(input, output, act_info);
114
Pablo Tello32521432018-11-15 14:43:10 +0000115 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
116 << node.name()
117 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000118 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100119 << " Data Type: " << input->info()->data_type()
120 << " Shape: " << input->info()->tensor_shape()
121 << " Activation function: " << act_info.activation()
122 << " a: " << act_info.a()
123 << " b: " << act_info.b()
124 << " InPlace : " << is_in_place_operation(input, output)
125 << std::endl);
126
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100127 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100128}
129
thecha01e8f05da2020-08-24 17:21:41 +0100130/** Creates a backend argminmax layer function
131 *
132 * @tparam ArgMinMaxLayerFunction Backend activation function
133 * @tparam TargetInfo Target-specific information
134 *
135 * @param[in] node Node to create the backend function for
136 *
137 * @return Backend argminmax layer function
138 */
139template <typename ArgMinMaxLayerFunction, typename TargetInfo>
140std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
141{
142 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
143
144 // Extract IO and info
145 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
146 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
147 const ReductionOperation op = node.reduction_operation();
148 unsigned int axis = node.axis();
149
150 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000151 auto func = std::make_unique<ArgMinMaxLayerFunction>();
thecha01e8f05da2020-08-24 17:21:41 +0100152 func->configure(input, axis, output, op);
153
154 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
155 << node.name()
156 << " Type: " << node.type()
157 << " Target: " << TargetInfo::TargetType
158 << " Data Type: " << input->info()->data_type()
159 << " Shape: " << input->info()->tensor_shape()
160 << " Reduction Operation: " << op
161 << " axis: " << axis
162 << std::endl);
163
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100164 return std::move(func);
thecha01e8f05da2020-08-24 17:21:41 +0100165}
166
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100167/** Create a backend batch normalization layer function
168 *
169 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
170 * @tparam TargetInfo Target-specific information
171 *
172 * @param[in] node Node to create the backend function for
173 *
174 * @return Backend batch normalization layer function
175 */
176template <typename BatchNormalizationLayerFunction, typename TargetInfo>
177std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
178{
179 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
180
181 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000182 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
183 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
184 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
185 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
186 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
187
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100188 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
189 const float epsilon = node.epsilon();
190 const ActivationLayerInfo fused_act = node.fused_activation();
191
192 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000193 auto func = std::make_unique<BatchNormalizationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100194 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
195
196 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000197 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
198 << node.name()
199 << " Type: " << node.type()
200 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100201 << " Data Type: " << input->info()->data_type()
202 << " Shape: " << input->info()->tensor_shape()
203 << " Epsilon: " << epsilon << " "
204 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000205 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100206 << std::endl);
207
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100208 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100209}
210
giuros01acce5042019-02-21 17:32:34 +0000211/** Create a backend batch normalization layer function
212 *
213 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
214 * @tparam TargetInfo Target-specific information
215 *
216 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000217 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000218 *
219 * @return Backend batch normalization layer function
220 */
221template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000222std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000223{
224 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
225
226 // Extract IO and info
227 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
228 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
229 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
230 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
231 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
232 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
233 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
234
235 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
236
237 const PadStrideInfo conv_info = node.convolution_info();
238 const unsigned int num_groups = node.num_groups();
239 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
240 const ActivationLayerInfo fused_act = node.fused_activation();
241 const float epsilon = node.epsilon();
242
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000243 // Create and configure function (we assume that functions have been validated before creation)
244 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
245 std::unique_ptr<IFunction> func;
246 std::string func_name;
247
248 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
249
giuros01acce5042019-02-21 17:32:34 +0000250 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000251 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
252 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000253
254 // Log info
255 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
256 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100257 << " Type: " << node.type()
258 << " Target: " << TargetInfo::TargetType
259 << " Data Type: " << input->info()->data_type()
260 << " Input shape: " << input->info()->tensor_shape()
261 << " Weights shape: " << weights->info()->tensor_shape()
262 << " Output shape: " << output->info()->tensor_shape()
263 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
264 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100265 return std::move(func);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100266}
267
268/** Create a backend fused depthwise convolution batch normalization layer function
269 *
270 * @tparam FusedLayerTypes Fused layer types
271 * @tparam TargetInfo Target-specific information
272 *
273 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000274 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100275 *
276 * @return Backend fused depthwise convolution batch normalization layer function
277 */
278template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000279std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100280{
281 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
282
283 // Extract IO and info
284 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
285 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
286 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
287 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
288 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
289 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
290 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
291
292 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
293
294 const PadStrideInfo conv_info = node.convolution_info();
295 const unsigned int depth_multiplier = node.depth_multiplier();
296 const ActivationLayerInfo fused_act = node.fused_activation();
297 const float epsilon = node.epsilon();
298
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000299 // Create and configure function (we assume that functions have been validated before creation)
300 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
301 std::unique_ptr<IFunction> func;
302 std::string func_name;
303
304 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
305
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100306 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000307 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
308 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100309
310 // Log info
311 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
312 << node.name()
313 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000314 << " Target: " << TargetInfo::TargetType
315 << " Data Type: " << input->info()->data_type()
316 << " Input shape: " << input->info()->tensor_shape()
317 << " Weights shape: " << weights->info()->tensor_shape()
318 << " Output shape: " << output->info()->tensor_shape()
319 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
320 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100321 return std::move(func);
giuros01acce5042019-02-21 17:32:34 +0000322}
323
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100324/** Create a backend bounding box transform layer function
325 *
326 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
327 * @tparam TargetInfo Target-specific information
328 *
329 * @param[in] node Node to create the backend function for
330 *
331 * @return Backend bounding box transform layer function
332 */
333template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
334std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
335{
336 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
337
338 // Extract IO and info
339 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
340 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
341 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
342 const BoundingBoxTransformInfo bbox_info = node.info();
343
344 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000345 auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100346 func->configure(input, output, deltas, bbox_info);
347
348 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000349 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
350 << node.name()
351 << " Type: " << node.type()
352 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100353 << " Data Type: " << input->info()->data_type()
354 << " Shape: " << input->info()->tensor_shape()
355 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
356 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
357 << std::endl);
358
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100359 return std::move(func);
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100360}
361
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362/** Create a backend channel shuffle layer function
363 *
364 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
365 * @tparam TargetInfo Target-specific information
366 *
367 * @param[in] node Node to create the backend function for
368 *
369 * @return Backend channel shuffle layer function
370 */
371template <typename ChannelShuffleLayerFunction, typename TargetInfo>
372std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
373{
374 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
375
376 // Extract IO and info
377 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
378 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
379 const unsigned int num_groups = node.num_groups();
380
381 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000382 auto func = std::make_unique<ChannelShuffleLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100383 func->configure(input, output, num_groups);
384
Pablo Tello32521432018-11-15 14:43:10 +0000385 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
386 << node.name()
387 << " Type: " << node.type()
388 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100389 << " Data Type: " << input->info()->data_type()
390 << " Shape: " << input->info()->tensor_shape()
391 << " Num groups: " << num_groups
392 << std::endl);
393
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100394 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100395}
396
Georgios Pinitase2220552018-07-20 13:23:44 +0100397/** Create a backend layer concatenate function
398 *
399 * @tparam ConcatenateLayerFunction Backend concatenate function
400 * @tparam TargetInfo Target-specific information
401 *
402 * @param[in] node Node to create the backend function for
403 *
404 * @return Backend concatenate layer function
405 */
406template <typename ConcatenateLayerFunction, typename TargetInfo>
407std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
408{
409 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
410 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
411
412 // Return nullptr if depth concatenate is switched off
413 if(!node.is_enabled())
414 {
415 return nullptr;
416 }
417
418 // Extract IO and info
Georgios Pinitas4667ddd2020-07-13 21:21:33 +0100419 std::vector<typename TargetInfo::SrcTensorType *> inputs;
Georgios Pinitase2220552018-07-20 13:23:44 +0100420 for(unsigned int i = 0; i < node.num_inputs(); ++i)
421 {
422 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
423 }
424 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100425 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
426 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100427
428 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000429 auto func = std::make_unique<ConcatenateLayerFunction>();
Georgios Pinitase2220552018-07-20 13:23:44 +0100430 func->configure(inputs, output, concat_axis);
431
432 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000433 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
434 std::ostringstream qss;
435 if(is_quantized)
436 {
437 qss << " Output QuantInfo: " << output->info()->quantization_info();
438 }
Pablo Tello32521432018-11-15 14:43:10 +0000439 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
440 << node.name()
441 << " Type: " << node.type()
442 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100443 << " Data Type: " << output->info()->data_type()
444 << " Shape: " << output->info()->tensor_shape()
445 << " Num Inputs: " << inputs.size()
446 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000447 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100448 << std::endl);
449
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100450 return std::move(func);
Georgios Pinitase2220552018-07-20 13:23:44 +0100451}
452
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453/** Create a backend convolution layer function
454 *
455 * @tparam ConvolutionLayerFunctions Backend convolution functions
Sheri Zhangfb228032021-11-02 10:45:07 +0000456 * @tparam TargetInfo Target-specific information
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100457 *
458 * @param[in] node Node to create the backend function for
459 * @param[in] ctx Graph context
460 *
461 * @return Backend convolution layer function
462 */
463template <typename ConvolutionLayerFunctions, typename TargetInfo>
464std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
465{
466 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
467
468 // Extract IO and info
469 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
470 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
471 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
472 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
473
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100474 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
475
476 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100477 {
478 biases->info()->set_data_type(DataType::S32);
479 }
480
Georgios Pinitas08346e92018-10-16 19:10:46 +0100481 const PadStrideInfo conv_info = node.convolution_info();
482 const unsigned int num_groups = node.num_groups();
483 const ConvolutionMethod conv_algorithm = node.convolution_method();
484 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
485 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100486
487 // Create and configure function (we assume that functions have been validated before creation)
488 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
489 std::unique_ptr<IFunction> func;
490 std::string func_name;
491
Georgios Pinitase2220552018-07-20 13:23:44 +0100492 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100493 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100494 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100495 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
496 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100497 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100498 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100499 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100501 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100502 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
503 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100504 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100505 }
506 else if(conv_algorithm == ConvolutionMethod::GEMM)
507 {
508 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
509 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100510 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100511 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100512 }
513 else
514 {
515 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
516 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100517 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100518 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100519 }
520
521 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100522 std::ostringstream qss;
523 if(is_quantized)
524 {
525 qss << " Input QuantInfo: " << input->info()->quantization_info()
526 << " Weights QuantInfo: " << weights->info()->quantization_info()
527 << " Output QuantInfo: " << output->info()->quantization_info();
528 }
Pablo Tello32521432018-11-15 14:43:10 +0000529 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
530 << node.name()
531 << " Type: " << func_name
532 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100533 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100534 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100535 << " Input shape: " << input->info()->tensor_shape()
536 << " Weights shape: " << weights->info()->tensor_shape()
537 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000538 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100539 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100540 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100541 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100542}
543
ramelg01b75d6242021-11-26 19:12:40 +0000544/** Create a backend convolution layer function with post operator
Sheri Zhangfb228032021-11-02 10:45:07 +0000545 *
546 * @tparam ConvolutionLayerFunctions Backend convolution functions
547 * @tparam TargetInfo Target-specific information
548 *
549 * @param[in] node Node to create the backend function for
550 * @param[in] ctx Graph context
551 *
552 * @return Backend convolution layer function
553 */
554template <typename ConvolutionLayerFunctions, typename TargetInfo>
555std::unique_ptr<IFunction> create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
556{
557 validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
558
559 // Extract IO and info
560 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
561 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
562 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
563 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
564
565 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
566
567 if(is_quantized)
568 {
569 biases->info()->set_data_type(DataType::S32);
570 }
571
572 const PadStrideInfo conv_info = node.convolution_info();
573 const unsigned int num_groups = node.num_groups();
574 const ActivationLayerInfo fused_act = node.fused_activation();
575
576 experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
577
578 auto &post_op_info_list = node.post_op_info_list();
579 for(const auto &post_op_info : post_op_info_list)
580 {
581 switch(post_op_info->type())
582 {
583 case PostOpType::Activation:
584 {
585 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
586 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
587 break;
588 }
589 case PostOpType::Eltwise_Add:
590 {
591 typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
592 const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
593 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
594 break;
595 }
596 default:
597 {
598 ARM_COMPUTE_ERROR("Unsupported PostOpType");
599 }
600 }
601 }
602
603 // Create and configure function (we assume that functions have been validated before creation)
604 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
605 std::unique_ptr<IFunction> func;
606 std::string func_name;
607
Sheri Zhangc65023e2021-11-03 21:24:00 +0000608 // Fuse convolution with post ops is only supported for conv1x1, which is only implemented as gemmconv2d
Sheri Zhangfb228032021-11-02 10:45:07 +0000609 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
610 std::string("GEMMConvolutionLayer"), mm,
611 input, weights, biases, output, conv_info,
612 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
613
614 // Log info
615 std::ostringstream qss;
616 if(is_quantized)
617 {
618 qss << " Input QuantInfo: " << input->info()->quantization_info()
619 << " Weights QuantInfo: " << weights->info()->quantization_info()
620 << " Output QuantInfo: " << output->info()->quantization_info();
621 }
622 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
623 << node.name()
624 << " Type: " << func_name
625 << " Target: " << TargetInfo::TargetType
626 << " Data Type: " << input->info()->data_type()
627 << " Groups: " << num_groups
628 << " Input shape: " << input->info()->tensor_shape()
629 << " Weights shape: " << weights->info()->tensor_shape()
630 << " Output shape: " << output->info()->tensor_shape()
631 << qss.str()
632 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
ramelg01b75d6242021-11-26 19:12:40 +0000633 << " Post ops" << post_ops;
634 << std::endl);
635 return std::move(func);
636}
637
638/** Create a backend convolution batch normalization layer function with post operator
639 *
640 * @tparam FusedLayerTypes Backend convolution functions
641 * @tparam TargetInfo Target-specific information
642 *
643 * @param[in] node Node to create the backend function for
644 * @param[in] ctx Graph context
645 *
646 * @return Backend fused convolution with batch normalization layer function
647 */
648template <typename FusedLayerTypes, typename TargetInfo>
649std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_with_post_op(FusedConvolutionBatchNormalizationWithPostOpsNode &node, GraphContext &ctx)
650{
651 validate_node<TargetInfo>(node, 8 /* expected inputs */, 1 /* expected outputs */);
652
653 // Extract IO and info
654 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
655 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
656 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
657 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
658 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
659 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
660 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
661
662 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
663
664 const PadStrideInfo conv_info = node.convolution_info();
665 const unsigned int num_groups = node.num_groups();
666 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
667 const float epsilon = node.epsilon();
668
669 experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
670
671 auto &post_op_info_list = node.post_op_info_list();
672 for(const auto &post_op_info : post_op_info_list)
673 {
674 switch(post_op_info->type())
675 {
676 case PostOpType::Activation:
677 {
678 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
679 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
680 break;
681 }
682 case PostOpType::Eltwise_Add:
683 {
684 typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
685 const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
686 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
687 break;
688 }
689 default:
690 {
691 ARM_COMPUTE_ERROR("Unsupported PostOpType");
692 }
693 }
694 }
695
696 // Create and configure function (we assume that functions have been validated before creation)
697 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
698 std::unique_ptr<IFunction> func;
699 std::string func_name;
700
701 using FType = FusedConvolutionBatchNormalizationWithPostOpsFunction<TargetInfo, FusedLayerTypes>;
702
703 // Create and configure function
704 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
705 std::string("FusedConvolutionBatchNormalizationLayerWithPostOpsLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, post_ops);
706
707 // Log info
708 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
709 << node.name()
710 << " Type: " << node.type()
711 << " Target: " << TargetInfo::TargetType
712 << " Data Type: " << input->info()->data_type()
713 << " Input shape: " << input->info()->tensor_shape()
714 << " Weights shape: " << weights->info()->tensor_shape()
715 << " Output shape: " << output->info()->tensor_shape()
716 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
717 << " Post Ops:" << post_ops;
Sheri Zhangfb228032021-11-02 10:45:07 +0000718 << std::endl);
719 return std::move(func);
720}
721
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100722/** Create a backend deconvolution layer function
723 *
724 * @tparam DeconvolutionLayerFunction Backend deconvolution function
725 * @tparam TargetInfo Target-specific information
726 *
727 * @param[in] node Node to create the backend function for
728 * @param[in] ctx Graph context
729 *
730 * @return Backend deconvolution layer function
731 */
732template <typename DeconvolutionLayerFunction, typename TargetInfo>
733std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
734{
735 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
736
737 // Extract IO and info
738 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
739 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
740 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
741 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
742
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100743 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100744
745 // Create and configure function (we assume that functions have been validated before creation)
746 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
747 std::unique_ptr<IFunction> func;
748
749 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
750 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100751 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100752
753 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000754 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
755 << node.name()
756 << " Type: " << node.type()
757 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100758 << " Data Type: " << input->info()->data_type()
759 << " Input shape: " << input->info()->tensor_shape()
760 << " Weights shape: " << weights->info()->tensor_shape()
761 << " Output shape: " << output->info()->tensor_shape()
762 << std::endl);
763 return func;
764}
765
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100766/** Create a backend layer depth-wise convolution function
767 *
768 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
769 * @tparam TargetInfo Target-specific information
770 *
771 * @param[in] node Node to create the backend function for
772 *
773 * @return Backend depth-wise convolution layer function
774 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100775template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100776std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
777{
778 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
779
780 // Extract IO and info
781 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
782 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
783 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
784 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
785
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100786 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
787
788 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100789 {
790 biases->info()->set_data_type(DataType::S32);
791 }
792
Manuel Bottini05069f02019-09-26 17:18:26 +0100793 const PadStrideInfo conv_info = node.convolution_info();
794 const unsigned int depth_multiplier = node.depth_multiplier();
795 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100796
797 // Create and configure function (we assume that functions have been validated before creation)
798 std::unique_ptr<IFunction> func;
799 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100800
801 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
802 std::string("DepthwiseConvolutionLayer"),
803 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100804
805 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100806 std::ostringstream qss;
807 if(is_quantized)
808 {
809 qss << " Input QuantInfo: " << input->info()->quantization_info()
810 << " Weights QuantInfo: " << weights->info()->quantization_info()
811 << " Output QuantInfo: " << output->info()->quantization_info();
812 }
Pablo Tello32521432018-11-15 14:43:10 +0000813 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
814 << node.name()
815 << " Type: " << func_name
816 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100817 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100818 << " Input shape: " << input->info()->tensor_shape()
819 << " Weights shape: " << weights->info()->tensor_shape()
820 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000821 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000822 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100823 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100824 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100825 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100826}
827
thecha010a05e6a2020-08-28 18:40:38 +0100828/** Create a backend depth to space layer function
829 *
830 * @tparam DepthToSpaceLayerNode Function Backend depth to space function
831 * @tparam TargetInfo Target-specific information
832 *
833 * @param[in] node Node to create the backend function for
834 *
835 * @return Backend depth to space layer function
836 */
837template <typename DepthToSpaceLayerFunction, typename TargetInfo>
838std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &node)
839{
840 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
841
842 // Extract IO and info
843 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
844 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
845
846 ARM_COMPUTE_ERROR_ON(input == nullptr);
847 ARM_COMPUTE_ERROR_ON(output == nullptr);
848
849 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000850 auto func = std::make_unique<DepthToSpaceLayerFunction>();
thecha010a05e6a2020-08-28 18:40:38 +0100851 func->configure(input, output, node.block_shape());
852
853 // Log info
854 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
855 << node.name()
856 << " Type: " << node.type()
857 << " Target: " << TargetInfo::TargetType
858 << " Data Type: " << input->info()->data_type()
859 << " Input shape: " << input->info()->tensor_shape()
860 << " Block Size: " << node.block_shape()
861 << " Output shape: " << output->info()->tensor_shape()
862 << std::endl);
863
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100864 return std::move(func);
thecha010a05e6a2020-08-28 18:40:38 +0100865}
866
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000867/** Create a backend dequantize layer function
868 *
869 * @tparam DequantizationLayer Function Backend dequantize function
870 * @tparam TargetInfo Target-specific information
871 *
872 * @param[in] node Node to create the backend function for
873 *
874 * @return Backend dequantize layer function
875 */
876template <typename DequantizationLayerFunction, typename TargetInfo>
877std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
878{
879 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
880
881 // Extract IO and info
882 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
883 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
884
885 ARM_COMPUTE_ERROR_ON(input == nullptr);
886 ARM_COMPUTE_ERROR_ON(output == nullptr);
887
888 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000889 auto func = std::make_unique<DequantizationLayerFunction>();
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000890 func->configure(input, output);
891
892 // Log info
893 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
894 << node.name()
895 << " Type: " << node.type()
896 << " Target: " << TargetInfo::TargetType
897 << " Data Type: " << input->info()->data_type()
898 << " Input shape: " << input->info()->tensor_shape()
899 << " Input quantization info: " << output->info()->quantization_info()
900 << " Output shape: " << output->info()->tensor_shape()
901 << std::endl);
902
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100903 return std::move(func);
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000904}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000905/** Create a backend detection output layer function
906 *
907 * @tparam DetectionOutputLayer Function Backend detection output function
908 * @tparam TargetInfo Target-specific information
909 *
910 * @param[in] node Node to create the backend function for
911 *
912 * @return Backend detection output layer function
913 */
914template <typename DetectionOutputLayerFunction, typename TargetInfo>
915std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
916{
917 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
918
919 // Extract IO and info
920 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
921 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
922 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
923 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
924 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
925
926 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
927 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
928 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
929 ARM_COMPUTE_ERROR_ON(output == nullptr);
930
931 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000932 auto func = std::make_unique<DetectionOutputLayerFunction>();
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000933 func->configure(input0, input1, input2, output, detect_info);
934
935 // Log info
936 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
937 << node.name()
938 << " Type: " << node.type()
939 << " Target: " << TargetInfo::TargetType
940 << " Data Type: " << input0->info()->data_type()
941 << " Input0 shape: " << input0->info()->tensor_shape()
942 << " Input1 shape: " << input1->info()->tensor_shape()
943 << " Input2 shape: " << input2->info()->tensor_shape()
944 << " Output shape: " << output->info()->tensor_shape()
945 << " DetectionOutputLayer info: " << detect_info
946 << std::endl);
947
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100948 return std::move(func);
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000949}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000950
951/** Create a backend detection post process layer function
952 *
953 * @tparam DetectionPostProcessLayerFunction Backend detection output function
954 * @tparam TargetInfo Target-specific information
955 *
956 * @param[in] node Node to create the backend function for
957 *
958 * @return Backend detection post process layer function
959 */
960template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
961std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
962{
963 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
964
965 // Extract IO and info
966 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
967 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
968 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
969 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
970 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
971 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
972 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
973 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
974
975 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
976 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
977 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
978 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
979 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
980 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
981 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
982
983 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000984 auto func = std::make_unique<DetectionPostProcessLayerFunction>();
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000985 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
986
987 // Log info
988 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
989 << node.name()
990 << " Type: " << node.type()
991 << " Target: " << TargetInfo::TargetType
992 << " Data Type: " << input0->info()->data_type()
993 << " Input0 shape: " << input0->info()->tensor_shape()
994 << " Input1 shape: " << input1->info()->tensor_shape()
995 << " Input2 shape: " << input2->info()->tensor_shape()
996 << " Output0 shape: " << output0->info()->tensor_shape()
997 << " Output1 shape: " << output1->info()->tensor_shape()
998 << " Output2 shape: " << output2->info()->tensor_shape()
999 << " Output3 shape: " << output3->info()->tensor_shape()
1000 << " DetectionPostProcessLayer info: " << detect_info
1001 << std::endl);
1002
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001003 return std::move(func);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +00001004}
1005
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001006/** Create a backend element-wise operation layer function
1007 *
1008 * @tparam EltwiseFunctions Backend element-wise function
1009 * @tparam TargetInfo Target-specific information
1010 *
1011 * @param[in] node Node to create the backend function for
1012 *
1013 * @return Backend element-wise operation layer function
1014 */
1015template <typename EltwiseFunctions, typename TargetInfo>
1016std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
1017{
1018 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1019
1020 // Extract IO and info
1021 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
1022 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
1023 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1024 const EltwiseOperation eltwise_op = node.eltwise_operation();
1025 const ConvertPolicy convert_policy = node.convert_policy();
Giorgio Arena8b2a7d32020-02-11 17:21:31 +00001026 const ActivationLayerInfo act_info = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001027 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1028 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
1029 ARM_COMPUTE_ERROR_ON(output == nullptr);
1030
1031 std::unique_ptr<IFunction> func = nullptr;
1032 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +01001033 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001034 {
1035 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
1036 std::string("ArithmeticAddition"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +00001037 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001038 }
Georgios Pinitase2220552018-07-20 13:23:44 +01001039 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001040 {
1041 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
1042 std::string("ArithmeticSubtraction"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +00001043 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001044 }
Georgios Pinitase2220552018-07-20 13:23:44 +01001045 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001046 {
1047 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
1048 std::string("PixelWiseMultiplication"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +00001049 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001050 }
thecha01f8e35842020-07-28 17:28:17 +01001051 else if(eltwise_op == EltwiseOperation::Max)
1052 {
1053 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
1054 std::string("ElementwiseMaximum"),
1055 input1, input2, output, act_info);
1056 }
Alessandro Navone6413e492021-02-02 11:39:05 +00001057 else if(eltwise_op == EltwiseOperation::Div)
1058 {
1059 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
1060 std::string("ArithmeticDivision"),
1061 input1, input2, output, act_info);
1062 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001063 else
1064 {
1065 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
1066 }
1067
1068 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001069 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1070 << node.name()
1071 << " Type: " << node.type()
1072 << " Target: " << TargetInfo::TargetType
1073 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001074 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +00001075 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001076 << std::endl);
1077
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001078 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001079}
1080
Sheri Zhang16dddd22020-05-27 15:03:48 +01001081/** Create a backend unary element-wise operation layer function
1082 *
1083 * @tparam UnaryEltwiseFunctions Backend unary element-wise function
1084 * @tparam TargetInfo Target-specific information
1085 *
1086 * @param[in] node Node to create the backend function for
1087 *
1088 * @return Backend unary element-wise operation layer function
1089 */
1090template <typename UnaryEltwiseFunctions, typename TargetInfo>
1091std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
1092{
1093 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1094
1095 // Extract IO and info
1096 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1097 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1098 const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
1099
1100 ARM_COMPUTE_ERROR_ON(input == nullptr);
1101 ARM_COMPUTE_ERROR_ON(output == nullptr);
1102
1103 std::unique_ptr<IFunction> func = nullptr;
1104 std::string func_name;
1105 if(eltwise_op == UnaryEltwiseOperation::Exp)
1106 {
1107 std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
1108 std::string("Exp"),
1109 input, output);
1110 }
1111 else
1112 {
1113 ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
1114 }
1115
1116 // Log info
1117 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1118 << node.name()
1119 << " Type: " << node.type()
1120 << " Target: " << TargetInfo::TargetType
1121 << " Operation: " << func_name
1122 << " Data Type: " << input->info()->data_type()
1123 << " Shape: " << input->info()->tensor_shape()
1124 << std::endl);
1125
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001126 return std::move(func);
Sheri Zhang16dddd22020-05-27 15:03:48 +01001127}
1128
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001129/** Create a backend flatten layer function
1130 *
1131 * @tparam FlattenLayerFunction Backend flatten function
1132 * @tparam TargetInfo Target-specific information
1133 *
1134 * @param[in] node Node to create the backend function for
1135 *
1136 * @return Backend flatten layer function
1137 */
1138template <typename FlattenLayerFunction, typename TargetInfo>
1139std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
1140{
1141 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1142
1143 // Extract IO and info
1144 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1145 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1146
Georgios Pinitase2220552018-07-20 13:23:44 +01001147 ARM_COMPUTE_ERROR_ON(input == nullptr);
1148 ARM_COMPUTE_ERROR_ON(output == nullptr);
1149
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001150 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001151 auto func = std::make_unique<FlattenLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001152 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001153
1154 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001155 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1156 << node.name()
1157 << " Type: " << node.type()
1158 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001159 << " Data Type: " << input->info()->data_type()
1160 << " Input shape: " << input->info()->tensor_shape()
1161 << " Output shape: " << output->info()->tensor_shape()
1162 << std::endl);
1163
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001164 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001165}
1166
1167/** Create a backend fully connected layer function
1168 *
1169 * @tparam FullyConnectedLayerFunction Backend fully-connected function
1170 * @tparam TargetInfo Target-specific information
1171 *
1172 * @param[in] node Node to create the backend function for
1173 * @param[in] ctx Graph context
1174 *
1175 * @return Backend fully connected layer function
1176 */
1177template <typename FullyConnectedLayerFunction, typename TargetInfo>
1178std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
1179{
1180 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1181
1182 // Extract IO and info
1183 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1184 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1185 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
1186 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
cfRodf2c022e2021-11-05 11:29:53 +00001187 FullyConnectedLayerInfo fc_info = node.info();
1188 fc_info.enable_fast_math = (node.fast_math_hint() == FastMathHint::Enabled);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001189
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001190 ARM_COMPUTE_ERROR_ON(input == nullptr);
1191 ARM_COMPUTE_ERROR_ON(weights == nullptr);
1192 ARM_COMPUTE_ERROR_ON(output == nullptr);
1193
Georgios Pinitase2220552018-07-20 13:23:44 +01001194 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +01001195 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
1196 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001197 auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +01001198 func->configure(input, weights, biases, output, fc_info);
1199
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001200 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1201
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001202 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001203 std::ostringstream qss;
1204 if(is_quantized)
1205 {
1206 qss << " Input QuantInfo: " << input->info()->quantization_info()
1207 << " Weights QuantInfo: " << weights->info()->quantization_info()
1208 << " Output QuantInfo: " << output->info()->quantization_info();
1209 }
Pablo Tello32521432018-11-15 14:43:10 +00001210 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1211 << node.name()
1212 << " Type: " << node.type()
1213 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001214 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001215 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001216 << " Input shape: " << input->info()->tensor_shape()
1217 << " Weights shape: " << weights->info()->tensor_shape()
1218 << " Output shape: " << output->info()->tensor_shape()
1219 << std::endl);
1220
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001221 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001222}
1223
Manuel Bottini5209be52019-02-13 16:34:56 +00001224/** Create a backend generate proposals layer function
1225 *
1226 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
1227 * @tparam TargetInfo Target-specific information
1228 *
1229 * @param[in] node Node to create the backend function for
1230 * @param[in] ctx Graph context
1231 *
1232 * @return Backend generate proposals layer function
1233 */
1234template <typename GenerateProposalsLayerFunction, typename TargetInfo>
1235std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
1236{
1237 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1238
1239 // Extract IO and info
1240 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1241 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1242 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1243 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1244 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1245 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1246 const GenerateProposalsInfo info = node.info();
1247
1248 ARM_COMPUTE_ERROR_ON(scores == nullptr);
1249 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1250 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1251 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1252 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1253
1254 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001255 auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
Manuel Bottini5209be52019-02-13 16:34:56 +00001256 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1257
1258 // Log info
1259 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1260 << " Target " << TargetInfo::TargetType
1261 << " Data Type: " << scores->info()->data_type()
1262 << " Scores shape: " << scores->info()->tensor_shape()
1263 << " Deltas shape: " << deltas->info()->tensor_shape()
1264 << " Anchors shape: " << anchors->info()->tensor_shape()
1265 << " Proposals shape: " << proposals->info()->tensor_shape()
1266 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1267 << " Scores Out shape: " << scores_out->info()->tensor_shape()
1268 << std::endl);
1269
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001270 return std::move(func);
Manuel Bottini5209be52019-02-13 16:34:56 +00001271}
1272
thecha013603aff2020-09-01 14:52:38 +01001273/** Create a backend l2 normalization layer function
1274 *
1275 * @tparam NormalizationLayerFunction Backend normalization function
1276 * @tparam TargetInfo Target-specific information
1277 *
1278 * @param[in] node Node to create the backend function for
1279 * @param[in] ctx Graph context
1280 *
1281 * @return Backend normalization layer function
1282 */
1283template <typename L2NormalizeLayerFunction, typename TargetInfo>
1284std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
1285{
1286 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1287
1288 // Extract IO and info
1289 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1290 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1291 int axis = node.axis();
1292 float epsilon = node.epsilon();
1293
1294 ARM_COMPUTE_ERROR_ON(input == nullptr);
1295 ARM_COMPUTE_ERROR_ON(output == nullptr);
1296
1297 // Create and configure function
1298 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001299 auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
thecha013603aff2020-09-01 14:52:38 +01001300 func->configure(input, output, axis, epsilon);
1301
1302 // Log info
1303 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1304 << node.name()
1305 << " Type: " << node.type()
1306 << " Target: " << TargetInfo::TargetType
1307 << " Data Type: " << input->info()->data_type()
1308 << " Input shape: " << input->info()->tensor_shape()
1309 << " Output shape: " << output->info()->tensor_shape()
1310 << " Axis: " << axis
1311 << " Epsilon: " << epsilon
1312 << std::endl);
1313
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001314 return std::move(func);
thecha013603aff2020-09-01 14:52:38 +01001315}
1316
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001317/** Create a backend normalization layer function
1318 *
1319 * @tparam NormalizationLayerFunction Backend normalization function
1320 * @tparam TargetInfo Target-specific information
1321 *
1322 * @param[in] node Node to create the backend function for
1323 * @param[in] ctx Graph context
1324 *
1325 * @return Backend normalization layer function
1326 */
1327template <typename NormalizationLayerFunction, typename TargetInfo>
1328std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1329{
1330 ARM_COMPUTE_UNUSED(ctx);
1331
1332 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1333
1334 // Extract IO and info
1335 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1336 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1337 const NormalizationLayerInfo norm_info = node.normalization_info();
1338 ARM_COMPUTE_ERROR_ON(input == nullptr);
1339 ARM_COMPUTE_ERROR_ON(output == nullptr);
1340
1341 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001342 auto func = std::make_unique<NormalizationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001343 func->configure(input, output, norm_info);
1344
1345 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001346 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1347 << node.name()
1348 << " Type: " << node.type()
1349 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001350 << " Data Type: " << input->info()->data_type()
1351 << " Input shape: " << input->info()->tensor_shape()
1352 << " Output shape: " << output->info()->tensor_shape()
1353 << " Normalization info: " << norm_info.type()
1354 << std::endl);
1355
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001356 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001357}
1358
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001359/** Create a backend normalize planar YUV layer function
1360 *
1361 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1362 * @tparam TargetInfo Target-specific information
1363 *
1364 * @param[in] node Node to create the backend function for
1365 *
1366 * @return Backend normalize plnar YUV layer function
1367 */
1368template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1369std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1370{
1371 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1372
1373 // Extract IO and info
1374 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1375 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1376 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1377 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1378 ARM_COMPUTE_ERROR_ON(input == nullptr);
1379 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1380 ARM_COMPUTE_ERROR_ON(std == nullptr);
1381 ARM_COMPUTE_ERROR_ON(output == nullptr);
1382
1383 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001384 auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001385 func->configure(input, output, mean, std);
1386
1387 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001388 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1389 << node.name()
1390 << " Type: " << node.type()
1391 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001392 << " Data Type: " << input->info()->data_type()
1393 << " Shape: " << input->info()->tensor_shape()
1394 << std::endl);
1395
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001396 return std::move(func);
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001397}
1398
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001399/** Create a backend pad layer function
1400 *
1401 * @tparam PadLayerFunction Backend pad function
1402 * @tparam TargetInfo Target-specific information
1403 *
1404 * @param[in] node Node to create the backend function for
1405 *
1406 * @return Backend pad layer function
1407 */
1408template <typename PadLayerFunction, typename TargetInfo>
1409std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1410{
1411 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1412
1413 // Extract IO and info
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001414 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1415 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1416 const PaddingList &padding = node.padding();
1417 const PixelValue pad_value = node.pad_value();
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001418 ARM_COMPUTE_ERROR_ON(input == nullptr);
1419 ARM_COMPUTE_ERROR_ON(output == nullptr);
1420
1421 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001422 auto func = std::make_unique<PadLayerFunction>();
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001423 func->configure(input, output, padding, pad_value);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001424
1425 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001426 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1427 << node.name()
1428 << " Type: " << node.type()
1429 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001430 << " Data Type: " << input->info()->data_type()
1431 << " Input shape: " << input->info()->tensor_shape()
1432 << " Output shape: " << output->info()->tensor_shape()
1433 << std::endl);
1434
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001435 return std::move(func);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001436}
1437
Georgios Pinitas57c48242018-08-02 13:41:49 +01001438/** Create a backend permute layer function
1439 *
1440 * @tparam PermuteLayerFunction Backend permute function
1441 * @tparam TargetInfo Target-specific information
1442 *
1443 * @param[in] node Node to create the backend function for
1444 *
1445 * @return Backend permute layer function
1446 */
1447template <typename PermuteLayerFunction, typename TargetInfo>
1448std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1449{
1450 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1451
1452 // Extract IO and info
1453 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1454 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1455 const PermutationVector &perm = node.permutation_vector();
1456 ARM_COMPUTE_ERROR_ON(input == nullptr);
1457 ARM_COMPUTE_ERROR_ON(output == nullptr);
1458
1459 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001460 auto func = std::make_unique<PermuteLayerFunction>();
Georgios Pinitas57c48242018-08-02 13:41:49 +01001461 func->configure(input, output, perm);
1462
1463 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001464 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1465 << node.name()
1466 << " Type: " << node.type()
1467 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001468 << " Data Type: " << input->info()->data_type()
1469 << " Input shape: " << input->info()->tensor_shape()
1470 << " Output shape: " << output->info()->tensor_shape()
1471 << " Permutation vector: " << perm
1472 << std::endl);
1473
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001474 return std::move(func);
Georgios Pinitas57c48242018-08-02 13:41:49 +01001475}
1476
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001477/** Create a backend pooling layer function
1478 *
1479 * @tparam PoolingLayerFunction Backend pooling function
1480 * @tparam TargetInfo Target-specific information
1481 *
1482 * @param[in] node Node to create the backend function for
1483 *
1484 * @return Backend pooling layer function
1485 */
1486template <typename PoolingLayerFunction, typename TargetInfo>
1487std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1488{
1489 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1490
1491 // Extract IO and info
1492 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1493 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1494 const PoolingLayerInfo pool_info = node.pooling_info();
1495 ARM_COMPUTE_ERROR_ON(input == nullptr);
1496 ARM_COMPUTE_ERROR_ON(output == nullptr);
1497
1498 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001499 auto func = std::make_unique<PoolingLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001500 func->configure(input, output, pool_info);
1501
1502 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001503 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1504 << node.name()
1505 << " Type: " << node.type()
1506 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001507 << " Data Type: " << input->info()->data_type()
1508 << " Input shape: " << input->info()->tensor_shape()
1509 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001510 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001511 << std::endl);
1512
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001513 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001514}
1515
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001516/** Create a backend PRelu layer function
1517 *
1518 * @tparam PReluFunction Backend PRelu function
1519 * @tparam TargetInfo Target-specific information
1520 *
1521 * @param[in] node Node to create the backend function for
1522 *
1523 * @return Backend PRelu layer function
1524 */
1525template <typename PReluFunction, typename TargetInfo>
1526std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1527{
1528 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1529
1530 // Extract IO and info
1531 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1532 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1533 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1534 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1535 ARM_COMPUTE_ERROR_ON(output == nullptr);
1536
1537 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001538 auto func = std::make_unique<PReluFunction>();
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001539 func->configure(input, alpha, output);
1540
1541 // Log info
1542 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1543 << node.name()
1544 << " Type: " << node.type()
1545 << " Target: " << TargetInfo::TargetType
1546 << " Data Type: " << input->info()->data_type()
1547 << " Input shape: " << input->info()->tensor_shape()
1548 << " Output shape: " << output->info()->tensor_shape()
1549 << std::endl);
1550
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001551 return std::move(func);
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001552}
1553
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001554/** Create a backend print layer function
1555 *
1556 * @tparam TargetInfo Target-specific information
1557 *
1558 * @param[in] node Node to create the backend function for
1559 *
1560 * @return Backend print layer function
1561 */
1562template <typename TargetInfo>
1563std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1564{
1565 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1566
1567 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1568 ARM_COMPUTE_ERROR_ON(input == nullptr);
1569 ARM_COMPUTE_UNUSED(input);
1570
1571 // Log info
1572 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1573 << node.name()
1574 << " Type: " << node.type()
1575 << " Target: " << TargetInfo::TargetType
1576 << " Data Type: " << input->info()->data_type()
1577 << " Input shape: " << input->info()->tensor_shape()
1578 << std::endl);
1579
1580 return nullptr;
1581}
1582
Pablo Tello32521432018-11-15 14:43:10 +00001583/** Create a backend priorbox layer function
1584 *
1585 * @tparam PriorBoxLayerFunction Backend priorbox function
1586 * @tparam TargetInfo Target-specific information
1587 *
1588 * @param[in] node Node to create the backend function for
1589 *
1590 * @return Backend priorbox layer function
1591 */
1592template <typename PriorBoxLayerFunction, typename TargetInfo>
1593std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1594{
1595 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1596
1597 // Extract IO and info
1598 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1599 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1600 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1601 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1602 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1603 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1604 ARM_COMPUTE_ERROR_ON(output == nullptr);
1605
1606 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001607 auto func = std::make_unique<PriorBoxLayerFunction>();
Pablo Tello32521432018-11-15 14:43:10 +00001608 func->configure(input0, input1, output, prior_info);
1609
1610 // Log info
1611 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1612 << node.name()
1613 << " Type: " << node.type()
1614 << " Target: " << TargetInfo::TargetType
1615 << " Data Type: " << input0->info()->data_type()
1616 << " Input0 shape: " << input0->info()->tensor_shape()
1617 << " Input1 shape: " << input1->info()->tensor_shape()
1618 << " Output shape: " << output->info()->tensor_shape()
1619 << " PriorBoxLayer info: " << prior_info
1620 << std::endl);
1621
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001622 return std::move(func);
Pablo Tello32521432018-11-15 14:43:10 +00001623}
1624
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001625/** Create a backend quantization layer function
1626 *
1627 * @tparam QuantizationLayerFunction Backend quantization function
1628 * @tparam TargetInfo Target-specific information
1629 *
1630 * @param[in] node Node to create the backend function for
1631 *
1632 * @return Backend quantization layer function
1633 */
1634template <typename QuantizationLayerFunction, typename TargetInfo>
1635std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1636{
1637 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1638
1639 // Extract IO and info
1640 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1641 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1642 ARM_COMPUTE_ERROR_ON(input == nullptr);
1643 ARM_COMPUTE_ERROR_ON(output == nullptr);
1644
1645 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001646 auto func = std::make_unique<QuantizationLayerFunction>();
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001647 func->configure(input, output);
1648
1649 // Log info
1650 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1651 << node.name()
1652 << " Type: " << node.type()
1653 << " Target: " << TargetInfo::TargetType
1654 << " Data Type: " << input->info()->data_type()
1655 << " Input shape: " << input->info()->tensor_shape()
1656 << " Output shape: " << output->info()->tensor_shape()
1657 << std::endl);
1658
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001659 return std::move(func);
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001660}
1661
thecha01d64444b2020-09-07 14:50:21 +01001662/** Create a backend reduction operation layer function
1663 *
1664 * @tparam ReductionOperationFunction Backend reduction operation function
1665 * @tparam TargetInfo Target-specific information
1666 *
1667 * @param[in] node Node to create the backend function for
1668 * @param[in] ctx Graph context
1669 *
1670 * @return Backend reduction sum layer function
1671 */
1672template <typename ReductionOperationFunction, typename TargetInfo>
1673std::unique_ptr<IFunction> create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
1674{
1675 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1676
1677 // Extract IO and info
1678 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1679 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1680 ReductionOperation op = node.op();
1681 int axis = node.axis();
1682 bool keep_dims = node.keep_dims();
1683 ARM_COMPUTE_ERROR_ON(input == nullptr);
1684 ARM_COMPUTE_ERROR_ON(output == nullptr);
1685
1686 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001687 auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
thecha01d64444b2020-09-07 14:50:21 +01001688 func->configure(input, output, axis, op, keep_dims);
1689
1690 // Log info
1691 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1692 << node.name()
1693 << " Type: " << node.type()
1694 << " Target: " << TargetInfo::TargetType
1695 << " Data Type: " << input->info()->data_type()
1696 << " Input shape: " << input->info()->tensor_shape()
1697 << " Output shape: " << output->info()->tensor_shape()
1698 << " Operation: " << op
1699 << " Axis: " << axis
1700 << " Keep dimensions:" << keep_dims
1701 << std::endl);
1702
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001703 return std::move(func);
thecha01d64444b2020-09-07 14:50:21 +01001704}
1705
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001706/** Create a backend reorg layer function
1707 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001708 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001709 * @tparam TargetInfo Target-specific information
1710 *
1711 * @param[in] node Node to create the backend function for
1712 *
1713 * @return Backend reshape layer function
1714 */
1715template <typename ReorgLayerFunction, typename TargetInfo>
1716std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1717{
1718 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1719
1720 // Extract IO and info
1721 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1722 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1723 ARM_COMPUTE_ERROR_ON(input == nullptr);
1724 ARM_COMPUTE_ERROR_ON(output == nullptr);
1725
1726 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001727 auto func = std::make_unique<ReorgLayerFunction>();
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001728 func->configure(input, output, node.stride());
1729
1730 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001731 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1732 << node.name()
1733 << " Type: " << node.type()
1734 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001735 << " Data Type: " << input->info()->data_type()
1736 << " Input shape: " << input->info()->tensor_shape()
1737 << " Output shape: " << output->info()->tensor_shape()
1738 << std::endl);
1739
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001740 return std::move(func);
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001741}
1742
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001743/** Create a backend reshape layer function
1744 *
1745 * @tparam ReshapeLayerFunction Backend reshape function
1746 * @tparam TargetInfo Target-specific information
1747 *
1748 * @param[in] node Node to create the backend function for
1749 *
1750 * @return Backend reshape layer function
1751 */
1752template <typename ReshapeLayerFunction, typename TargetInfo>
1753std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1754{
1755 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1756
1757 // Extract IO and info
1758 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1759 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1760 ARM_COMPUTE_ERROR_ON(input == nullptr);
1761 ARM_COMPUTE_ERROR_ON(output == nullptr);
1762
1763 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001764 auto func = std::make_unique<ReshapeLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001765 func->configure(input, output);
1766
1767 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001768 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1769 << node.name()
1770 << " Type: " << node.type()
1771 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001772 << " Data Type: " << input->info()->data_type()
1773 << " Input shape: " << input->info()->tensor_shape()
1774 << " Output shape: " << output->info()->tensor_shape()
1775 << std::endl);
1776
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001777 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001778}
1779
1780/** Create a backend resize layer function
1781 *
1782 * @tparam ResizeLayerFunction Backend resize function
1783 * @tparam TargetInfo Target-specific information
1784 *
1785 * @param[in] node Node to create the backend function for
1786 *
1787 * @return Backend resize layer function
1788 */
1789template <typename ResizeLayerFunction, typename TargetInfo>
1790std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1791{
1792 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1793
1794 // Extract IO and info
1795 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1796 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1797 ARM_COMPUTE_ERROR_ON(input == nullptr);
1798 ARM_COMPUTE_ERROR_ON(output == nullptr);
1799 const InterpolationPolicy policy = node.policy();
1800
1801 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001802 auto func = std::make_unique<ResizeLayerFunction>();
Georgios Pinitasc53266e2020-12-09 03:11:53 +00001803 func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001804
1805 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001806 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1807 << node.name()
1808 << " Type: " << node.type()
1809 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001810 << " Data Type: " << input->info()->data_type()
1811 << " Input shape: " << input->info()->tensor_shape()
1812 << " Output shape: " << output->info()->tensor_shape()
1813 << " Interpolation: " << policy
1814 << std::endl);
1815
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001816 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001817}
1818
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001819/** Create a backend ROI align layer function
1820 *
1821 * @tparam ROIAlignLayerFunction ROI Align function
1822 * @tparam TargetInfo Target-specific information
1823 *
1824 * @param[in] node Node to create the backend function for
1825 *
1826 * @return ROI Align layer function
1827 */
1828template <typename ROIAlignLayerFunction, typename TargetInfo>
1829std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1830{
1831 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1832
1833 // Extract IO and info
1834 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1835 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1836 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1837 ARM_COMPUTE_ERROR_ON(input == nullptr);
1838 ARM_COMPUTE_ERROR_ON(output == nullptr);
1839 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1840
1841 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1842
1843 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001844 auto func = std::make_unique<ROIAlignLayerFunction>();
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001845
1846 func->configure(input, rois, output, pool_info);
1847
1848 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001849 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1850 << node.name()
1851 << " Type: " << node.type()
1852 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001853 << " Data Type: " << input->info()->data_type()
1854 << " Input shape: " << input->info()->tensor_shape()
1855 << " Output shape: " << output->info()->tensor_shape()
1856 << " ROIs shape: " << rois->info()->tensor_shape()
1857 << " ROIPooling width: " << pool_info.pooled_width()
1858 << " ROIPooling height: " << pool_info.pooled_height()
1859 << std::endl);
1860
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001861 return std::move(func);
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001862}
1863
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001864/** Create a backend slice layer function
1865 *
1866 * @tparam SliceLayerFunction Backend slice function
1867 * @tparam TargetInfo Target-specific information
1868 *
1869 * @param[in] node Node to create the backend function for
1870 *
1871 * @return Backend slice layer function
1872 */
1873template <typename SliceLayerFunction, typename TargetInfo>
1874std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1875{
1876 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1877
1878 // Extract IO and info
1879 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1880 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1881 ARM_COMPUTE_ERROR_ON(input == nullptr);
1882 ARM_COMPUTE_ERROR_ON(output == nullptr);
1883
1884 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001885 auto func = std::make_unique<SliceLayerFunction>();
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001886 func->configure(input, output, node.starts(), node.ends());
1887
1888 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001889 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1890 << node.name()
1891 << " Type: " << node.type()
1892 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001893 << " Data Type: " << input->info()->data_type()
1894 << " Input shape: " << input->info()->tensor_shape()
1895 << " Output shape: " << output->info()->tensor_shape()
1896 << std::endl);
1897
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001898 return std::move(func);
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001899}
1900
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001901/** Create a backend softmax layer function
1902 *
1903 * @tparam SoftmaxLayerFunction Backend softmax function
1904 * @tparam TargetInfo Target-specific information
1905 *
1906 * @param[in] node Node to create the backend function for
1907 * @param[in] ctx Graph context
1908 *
1909 * @return Backend softmax layer function
1910 */
1911template <typename SoftmaxLayerFunction, typename TargetInfo>
1912std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1913{
1914 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1915
1916 // Extract IO and info
1917 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1918 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1919 const float beta = node.beta();
1920 ARM_COMPUTE_ERROR_ON(input == nullptr);
1921 ARM_COMPUTE_ERROR_ON(output == nullptr);
1922
1923 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001924 auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001925 func->configure(input, output, beta);
1926
1927 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001928 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1929 << node.name()
1930 << " Type: " << node.type()
1931 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001932 << " Data Type: " << input->info()->data_type()
1933 << " Input shape: " << input->info()->tensor_shape()
1934 << " Output shape: " << output->info()->tensor_shape()
1935 << std::endl);
1936
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001937 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001938}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001939
1940/** Create a backend layer stack function
1941 *
1942 * @tparam StackLayerFunction Backend stack function
1943 * @tparam TargetInfo Target-specific information
1944 *
1945 * @param[in] node Node to create the backend function for
1946 *
1947 * @return Backend stack layer function
1948 */
1949template <typename StackLayerFunction, typename TargetInfo>
1950std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1951{
1952 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1953 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1954
1955 // Extract IO and info
1956 std::vector<typename TargetInfo::TensorType *> inputs;
1957 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1958 {
1959 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1960 }
1961 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1962 const int axis = node.axis();
1963
1964 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001965 auto func = std::make_unique<StackLayerFunction>();
Michele Di Giorgioec699752019-03-22 15:25:32 +00001966 func->configure(inputs, axis, output);
1967
1968 // Log info
1969 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1970 << node.name()
1971 << " Type: " << node.type()
1972 << " Target: " << TargetInfo::TargetType
1973 << " Data Type: " << output->info()->data_type()
1974 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1975 << " Output shape: " << output->info()->tensor_shape()
1976 << " Num Inputs: " << inputs.size()
1977 << " Axis: " << axis
1978 << std::endl);
1979
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001980 return std::move(func);
Michele Di Giorgioec699752019-03-22 15:25:32 +00001981}
thecha012bfadd92020-08-12 17:25:51 +01001982
1983/** Create a backend slice layer function
1984 *
1985 * @tparam StridedSliceLayerFunction Backend strided slice function
1986 * @tparam TargetInfo Target-specific information
1987 *
1988 * @param[in] node Node to create the backend function for
1989 *
1990 * @return Backend strided slice layer function
1991 */
1992template <typename StridedSliceLayerFunction, typename TargetInfo>
1993std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &node)
1994{
1995 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1996
1997 // Extract IO and info
1998 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1999 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
2000 Coordinates starts = node.starts();
2001 Coordinates ends = node.ends();
2002 BiStrides strides = node.strides();
2003 StridedSliceLayerInfo info = node.strided_slice_info();
2004
2005 ARM_COMPUTE_ERROR_ON(input == nullptr);
2006 ARM_COMPUTE_ERROR_ON(output == nullptr);
2007
2008 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00002009 auto func = std::make_unique<StridedSliceLayerFunction>();
thecha012bfadd92020-08-12 17:25:51 +01002010 func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
2011
2012 // Log info
2013 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
2014 << node.name()
2015 << " Type: " << node.type()
2016 << " Target: " << TargetInfo::TargetType
2017 << " Data Type: " << input->info()->data_type()
2018 << " Input shape: " << input->info()->tensor_shape()
2019 << " Output shape: " << output->info()->tensor_shape()
2020 << std::endl);
2021
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01002022 return std::move(func);
thecha012bfadd92020-08-12 17:25:51 +01002023}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01002024} // namespace detail
2025} // namespace backends
2026} // namespace graph
2027} // namespace arm_compute
2028
Michalis Spyrouf4643372019-11-29 16:17:13 +00002029#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */