blob: 803283e20dfedcd842e6224b008c53624e11233a [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Alessandro Navone6413e492021-02-02 11:39:05 +00002 * Copyright (c) 2018-2021 Arm Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
SiCongLi31778612021-11-12 17:33:45 +000027#include "arm_compute/core/experimental/IPostOp.h"
28#include "arm_compute/core/experimental/PostOps.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010029#include "arm_compute/graph/Logger.h"
30#include "arm_compute/graph/Tensor.h"
31#include "arm_compute/graph/TypePrinter.h"
32#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010033#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000034#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
ramelg01b75d6242021-11-26 19:12:40 +000035#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationWithPostOpsFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010036#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010037#include "arm_compute/graph/backends/Utils.h"
38#include "arm_compute/graph/nodes/Nodes.h"
39
40#include "arm_compute/core/Error.h"
41#include "arm_compute/core/Helpers.h"
42#include "arm_compute/core/ITensorInfo.h"
Sang-Hoon Park68dd25f2020-10-19 16:00:11 +010043#include "support/Cast.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010044
45namespace arm_compute
46{
47namespace graph
48{
49namespace backends
50{
51namespace detail
52{
53/** Returns backing tensor of a given tensor
54 *
55 * @tparam TargetInfo Target information
56 *
57 * @param[in] tensor Tensor to extract the backing tensor from
58 *
59 * @return Backing tensor if present else nullptr
60 */
61template <typename TargetInfo>
62typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
63{
64 typename TargetInfo::TensorType *backing_tensor = nullptr;
65 if(tensor != nullptr)
66 {
67 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
68 // Get backing tensor handle
69 ITensorHandle *tensor_handle = tensor->handle();
70 // Get backing tensor
71 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
72 }
73
74 return backing_tensor;
75}
76
77template <typename TargetInfo>
78void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
79{
80 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000081 << " Target: " << TargetInfo::TargetType
82 << " ID: " << node.id()
83 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010084 << std::endl);
85
86 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
87 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
88 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010089 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010090}
91
92/** Creates a backend activation layer function
93 *
94 * @tparam ActivationLayerFunction Backend activation function
95 * @tparam TargetInfo Target-specific information
96 *
97 * @param[in] node Node to create the backend function for
98 *
99 * @return Backend activation layer function
100 */
101template <typename ActivationLayerFunction, typename TargetInfo>
102std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
103{
104 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
105
106 // Extract IO and info
107 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
108 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
109 const ActivationLayerInfo act_info = node.activation_info();
110
111 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000112 auto func = std::make_unique<ActivationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100113 func->configure(input, output, act_info);
114
Pablo Tello32521432018-11-15 14:43:10 +0000115 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
116 << node.name()
117 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000118 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100119 << " Data Type: " << input->info()->data_type()
120 << " Shape: " << input->info()->tensor_shape()
121 << " Activation function: " << act_info.activation()
122 << " a: " << act_info.a()
123 << " b: " << act_info.b()
124 << " InPlace : " << is_in_place_operation(input, output)
125 << std::endl);
126
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100127 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100128}
129
thecha01e8f05da2020-08-24 17:21:41 +0100130/** Creates a backend argminmax layer function
131 *
132 * @tparam ArgMinMaxLayerFunction Backend activation function
133 * @tparam TargetInfo Target-specific information
134 *
135 * @param[in] node Node to create the backend function for
136 *
137 * @return Backend argminmax layer function
138 */
139template <typename ArgMinMaxLayerFunction, typename TargetInfo>
140std::unique_ptr<IFunction> create_arg_min_max_layer(ArgMinMaxLayerNode &node)
141{
142 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
143
144 // Extract IO and info
145 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
146 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
147 const ReductionOperation op = node.reduction_operation();
148 unsigned int axis = node.axis();
149
150 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000151 auto func = std::make_unique<ArgMinMaxLayerFunction>();
thecha01e8f05da2020-08-24 17:21:41 +0100152 func->configure(input, axis, output, op);
153
154 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
155 << node.name()
156 << " Type: " << node.type()
157 << " Target: " << TargetInfo::TargetType
158 << " Data Type: " << input->info()->data_type()
159 << " Shape: " << input->info()->tensor_shape()
160 << " Reduction Operation: " << op
161 << " axis: " << axis
162 << std::endl);
163
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100164 return std::move(func);
thecha01e8f05da2020-08-24 17:21:41 +0100165}
166
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100167/** Create a backend batch normalization layer function
168 *
169 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
170 * @tparam TargetInfo Target-specific information
171 *
172 * @param[in] node Node to create the backend function for
173 *
174 * @return Backend batch normalization layer function
175 */
176template <typename BatchNormalizationLayerFunction, typename TargetInfo>
177std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
178{
179 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
180
181 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000182 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
183 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
184 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
185 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
186 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
187
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100188 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
189 const float epsilon = node.epsilon();
190 const ActivationLayerInfo fused_act = node.fused_activation();
191
192 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000193 auto func = std::make_unique<BatchNormalizationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100194 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
195
196 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000197 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
198 << node.name()
199 << " Type: " << node.type()
200 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100201 << " Data Type: " << input->info()->data_type()
202 << " Shape: " << input->info()->tensor_shape()
203 << " Epsilon: " << epsilon << " "
204 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000205 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100206 << std::endl);
207
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100208 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100209}
210
giuros01acce5042019-02-21 17:32:34 +0000211/** Create a backend batch normalization layer function
212 *
213 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
214 * @tparam TargetInfo Target-specific information
215 *
216 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000217 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000218 *
219 * @return Backend batch normalization layer function
220 */
221template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000222std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000223{
224 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
225
226 // Extract IO and info
227 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
228 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
229 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
230 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
231 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
232 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
233 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
234
235 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
236
237 const PadStrideInfo conv_info = node.convolution_info();
238 const unsigned int num_groups = node.num_groups();
239 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
240 const ActivationLayerInfo fused_act = node.fused_activation();
241 const float epsilon = node.epsilon();
242
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000243 // Create and configure function (we assume that functions have been validated before creation)
244 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
245 std::unique_ptr<IFunction> func;
246 std::string func_name;
247
248 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
249
giuros01acce5042019-02-21 17:32:34 +0000250 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000251 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
252 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000253
254 // Log info
255 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
256 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100257 << " Type: " << node.type()
258 << " Target: " << TargetInfo::TargetType
259 << " Data Type: " << input->info()->data_type()
260 << " Input shape: " << input->info()->tensor_shape()
261 << " Weights shape: " << weights->info()->tensor_shape()
262 << " Output shape: " << output->info()->tensor_shape()
263 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
264 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100265 return std::move(func);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100266}
267
268/** Create a backend fused depthwise convolution batch normalization layer function
269 *
270 * @tparam FusedLayerTypes Fused layer types
271 * @tparam TargetInfo Target-specific information
272 *
273 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000274 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100275 *
276 * @return Backend fused depthwise convolution batch normalization layer function
277 */
278template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000279std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100280{
281 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
282
283 // Extract IO and info
284 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
285 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
286 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
287 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
288 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
289 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
290 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
291
292 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
293
294 const PadStrideInfo conv_info = node.convolution_info();
295 const unsigned int depth_multiplier = node.depth_multiplier();
296 const ActivationLayerInfo fused_act = node.fused_activation();
297 const float epsilon = node.epsilon();
298
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000299 // Create and configure function (we assume that functions have been validated before creation)
300 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
301 std::unique_ptr<IFunction> func;
302 std::string func_name;
303
304 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
305
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100306 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000307 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
308 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100309
310 // Log info
311 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
312 << node.name()
313 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000314 << " Target: " << TargetInfo::TargetType
315 << " Data Type: " << input->info()->data_type()
316 << " Input shape: " << input->info()->tensor_shape()
317 << " Weights shape: " << weights->info()->tensor_shape()
318 << " Output shape: " << output->info()->tensor_shape()
319 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
320 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100321 return std::move(func);
giuros01acce5042019-02-21 17:32:34 +0000322}
323
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100324/** Create a backend bounding box transform layer function
325 *
326 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
327 * @tparam TargetInfo Target-specific information
328 *
329 * @param[in] node Node to create the backend function for
330 *
331 * @return Backend bounding box transform layer function
332 */
333template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
334std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
335{
336 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
337
338 // Extract IO and info
339 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
340 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
341 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
342 const BoundingBoxTransformInfo bbox_info = node.info();
343
344 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000345 auto func = std::make_unique<BoundingBoxTransformLayerFunction>();
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100346 func->configure(input, output, deltas, bbox_info);
347
348 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000349 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
350 << node.name()
351 << " Type: " << node.type()
352 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100353 << " Data Type: " << input->info()->data_type()
354 << " Shape: " << input->info()->tensor_shape()
355 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
356 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
357 << std::endl);
358
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100359 return std::move(func);
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100360}
361
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362/** Create a backend channel shuffle layer function
363 *
364 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
365 * @tparam TargetInfo Target-specific information
366 *
367 * @param[in] node Node to create the backend function for
368 *
369 * @return Backend channel shuffle layer function
370 */
371template <typename ChannelShuffleLayerFunction, typename TargetInfo>
372std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
373{
374 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
375
376 // Extract IO and info
377 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
378 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
379 const unsigned int num_groups = node.num_groups();
380
381 // Create function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000382 auto func = std::make_unique<ChannelShuffleLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100383 func->configure(input, output, num_groups);
384
Pablo Tello32521432018-11-15 14:43:10 +0000385 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
386 << node.name()
387 << " Type: " << node.type()
388 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100389 << " Data Type: " << input->info()->data_type()
390 << " Shape: " << input->info()->tensor_shape()
391 << " Num groups: " << num_groups
392 << std::endl);
393
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100394 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100395}
396
Georgios Pinitase2220552018-07-20 13:23:44 +0100397/** Create a backend layer concatenate function
398 *
399 * @tparam ConcatenateLayerFunction Backend concatenate function
400 * @tparam TargetInfo Target-specific information
401 *
402 * @param[in] node Node to create the backend function for
403 *
404 * @return Backend concatenate layer function
405 */
406template <typename ConcatenateLayerFunction, typename TargetInfo>
407std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
408{
409 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
410 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
411
412 // Return nullptr if depth concatenate is switched off
413 if(!node.is_enabled())
414 {
415 return nullptr;
416 }
417
418 // Extract IO and info
Georgios Pinitas4667ddd2020-07-13 21:21:33 +0100419 std::vector<typename TargetInfo::SrcTensorType *> inputs;
Georgios Pinitase2220552018-07-20 13:23:44 +0100420 for(unsigned int i = 0; i < node.num_inputs(); ++i)
421 {
422 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
423 }
424 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100425 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
426 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100427
428 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000429 auto func = std::make_unique<ConcatenateLayerFunction>();
Georgios Pinitase2220552018-07-20 13:23:44 +0100430 func->configure(inputs, output, concat_axis);
431
432 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000433 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
434 std::ostringstream qss;
435 if(is_quantized)
436 {
437 qss << " Output QuantInfo: " << output->info()->quantization_info();
438 }
Pablo Tello32521432018-11-15 14:43:10 +0000439 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
440 << node.name()
441 << " Type: " << node.type()
442 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100443 << " Data Type: " << output->info()->data_type()
444 << " Shape: " << output->info()->tensor_shape()
445 << " Num Inputs: " << inputs.size()
446 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000447 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100448 << std::endl);
449
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100450 return std::move(func);
Georgios Pinitase2220552018-07-20 13:23:44 +0100451}
452
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453/** Create a backend convolution layer function
454 *
455 * @tparam ConvolutionLayerFunctions Backend convolution functions
Sheri Zhangfb228032021-11-02 10:45:07 +0000456 * @tparam TargetInfo Target-specific information
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100457 *
458 * @param[in] node Node to create the backend function for
459 * @param[in] ctx Graph context
460 *
461 * @return Backend convolution layer function
462 */
463template <typename ConvolutionLayerFunctions, typename TargetInfo>
464std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
465{
466 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
467
468 // Extract IO and info
469 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
470 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
471 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
472 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
473
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100474 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
475
476 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100477 {
478 biases->info()->set_data_type(DataType::S32);
479 }
480
Georgios Pinitas08346e92018-10-16 19:10:46 +0100481 const PadStrideInfo conv_info = node.convolution_info();
482 const unsigned int num_groups = node.num_groups();
483 const ConvolutionMethod conv_algorithm = node.convolution_method();
484 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
485 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100486
487 // Create and configure function (we assume that functions have been validated before creation)
488 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
489 std::unique_ptr<IFunction> func;
490 std::string func_name;
491
Georgios Pinitase2220552018-07-20 13:23:44 +0100492 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100493 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100494 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100495 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
496 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100497 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100498 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100499 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100501 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100502 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
503 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100504 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100505 }
506 else if(conv_algorithm == ConvolutionMethod::GEMM)
507 {
508 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
509 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100510 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100511 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100512 }
513 else
514 {
515 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
516 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100517 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100518 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100519 }
520
521 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100522 std::ostringstream qss;
523 if(is_quantized)
524 {
525 qss << " Input QuantInfo: " << input->info()->quantization_info()
526 << " Weights QuantInfo: " << weights->info()->quantization_info()
527 << " Output QuantInfo: " << output->info()->quantization_info();
528 }
Pablo Tello32521432018-11-15 14:43:10 +0000529 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
530 << node.name()
531 << " Type: " << func_name
532 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100533 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100534 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100535 << " Input shape: " << input->info()->tensor_shape()
536 << " Weights shape: " << weights->info()->tensor_shape()
537 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000538 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100539 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100540 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100541 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100542}
543
ramelg01b75d6242021-11-26 19:12:40 +0000544/** Create a backend convolution layer function with post operator
Sheri Zhangfb228032021-11-02 10:45:07 +0000545 *
546 * @tparam ConvolutionLayerFunctions Backend convolution functions
547 * @tparam TargetInfo Target-specific information
548 *
549 * @param[in] node Node to create the backend function for
550 * @param[in] ctx Graph context
551 *
552 * @return Backend convolution layer function
553 */
554template <typename ConvolutionLayerFunctions, typename TargetInfo>
555std::unique_ptr<IFunction> create_fused_convolution_with_post_op(FusedConvolutionWithPostOpNode &node, GraphContext &ctx)
556{
557 validate_node<TargetInfo>(node, 4 /* expected inputs */, 1 /* expected outputs */);
558
559 // Extract IO and info
560 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
561 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
562 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
563 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
564
565 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
566
567 if(is_quantized)
568 {
569 biases->info()->set_data_type(DataType::S32);
570 }
571
572 const PadStrideInfo conv_info = node.convolution_info();
573 const unsigned int num_groups = node.num_groups();
574 const ActivationLayerInfo fused_act = node.fused_activation();
575
576 experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
577
578 auto &post_op_info_list = node.post_op_info_list();
579 for(const auto &post_op_info : post_op_info_list)
580 {
581 switch(post_op_info->type())
582 {
583 case PostOpType::Activation:
584 {
585 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
586 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
587 break;
588 }
589 case PostOpType::Eltwise_Add:
590 {
591 typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
592 const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
593 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
594 break;
595 }
596 default:
597 {
598 ARM_COMPUTE_ERROR("Unsupported PostOpType");
599 }
600 }
601 }
602
603 // Create and configure function (we assume that functions have been validated before creation)
604 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
605 std::unique_ptr<IFunction> func;
606 std::string func_name;
607
Sheri Zhangc65023e2021-11-03 21:24:00 +0000608 // Fuse convolution with post ops is only supported for conv1x1, which is only implemented as gemmconv2d
Sheri Zhangfb228032021-11-02 10:45:07 +0000609 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
610 std::string("GEMMConvolutionLayer"), mm,
611 input, weights, biases, output, conv_info,
612 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups, post_ops);
613
614 // Log info
615 std::ostringstream qss;
616 if(is_quantized)
617 {
618 qss << " Input QuantInfo: " << input->info()->quantization_info()
619 << " Weights QuantInfo: " << weights->info()->quantization_info()
620 << " Output QuantInfo: " << output->info()->quantization_info();
621 }
622 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
623 << node.name()
624 << " Type: " << func_name
625 << " Target: " << TargetInfo::TargetType
626 << " Data Type: " << input->info()->data_type()
627 << " Groups: " << num_groups
628 << " Input shape: " << input->info()->tensor_shape()
629 << " Weights shape: " << weights->info()->tensor_shape()
630 << " Output shape: " << output->info()->tensor_shape()
631 << qss.str()
632 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
SiCongLid9176142021-12-15 15:38:00 +0000633 << " Post ops" << post_ops
ramelg01b75d6242021-11-26 19:12:40 +0000634 << std::endl);
635 return std::move(func);
636}
637
638/** Create a backend convolution batch normalization layer function with post operator
639 *
640 * @tparam FusedLayerTypes Backend convolution functions
641 * @tparam TargetInfo Target-specific information
642 *
643 * @param[in] node Node to create the backend function for
644 * @param[in] ctx Graph context
645 *
646 * @return Backend fused convolution with batch normalization layer function
647 */
648template <typename FusedLayerTypes, typename TargetInfo>
649std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_with_post_op(FusedConvolutionBatchNormalizationWithPostOpsNode &node, GraphContext &ctx)
650{
651 validate_node<TargetInfo>(node, 8 /* expected inputs */, 1 /* expected outputs */);
652
653 // Extract IO and info
654 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
655 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
656 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
657 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
658 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
659 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
660 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
661
662 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
663
664 const PadStrideInfo conv_info = node.convolution_info();
665 const unsigned int num_groups = node.num_groups();
666 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
667 const float epsilon = node.epsilon();
668
669 experimental::PostOpList<typename TargetInfo::TensorType *> post_ops;
670
671 auto &post_op_info_list = node.post_op_info_list();
672 for(const auto &post_op_info : post_op_info_list)
673 {
674 switch(post_op_info->type())
675 {
676 case PostOpType::Activation:
677 {
678 const auto act_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoActivation *>(post_op_info.get());
679 post_ops.template push_back_op<experimental::PostOpAct<typename TargetInfo::TensorType *>>(act_info->_act);
680 break;
681 }
682 case PostOpType::Eltwise_Add:
683 {
684 typename TargetInfo::TensorType *add_input = get_backing_tensor<TargetInfo>(node.input(3));
685 const auto eltwise_info = utils::cast::polymorphic_downcast<const ConvPostOpInfoEltwiseAdd *>(post_op_info.get());
686 post_ops.template push_back_op<experimental::PostOpEltwiseAdd<typename TargetInfo::TensorType *>>(add_input, eltwise_info->_prev_op_dst_pos, eltwise_info->_policy);
687 break;
688 }
689 default:
690 {
691 ARM_COMPUTE_ERROR("Unsupported PostOpType");
692 }
693 }
694 }
695
696 // Create and configure function (we assume that functions have been validated before creation)
697 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
698 std::unique_ptr<IFunction> func;
699 std::string func_name;
700
701 using FType = FusedConvolutionBatchNormalizationWithPostOpsFunction<TargetInfo, FusedLayerTypes>;
702
703 // Create and configure function
704 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
705 std::string("FusedConvolutionBatchNormalizationLayerWithPostOpsLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, post_ops);
706
707 // Log info
708 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
709 << node.name()
710 << " Type: " << node.type()
711 << " Target: " << TargetInfo::TargetType
712 << " Data Type: " << input->info()->data_type()
713 << " Input shape: " << input->info()->tensor_shape()
714 << " Weights shape: " << weights->info()->tensor_shape()
715 << " Output shape: " << output->info()->tensor_shape()
SiCongLid9176142021-12-15 15:38:00 +0000716 << " Post Ops:" << post_ops
Sheri Zhangfb228032021-11-02 10:45:07 +0000717 << std::endl);
718 return std::move(func);
719}
720
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100721/** Create a backend deconvolution layer function
722 *
723 * @tparam DeconvolutionLayerFunction Backend deconvolution function
724 * @tparam TargetInfo Target-specific information
725 *
726 * @param[in] node Node to create the backend function for
727 * @param[in] ctx Graph context
728 *
729 * @return Backend deconvolution layer function
730 */
731template <typename DeconvolutionLayerFunction, typename TargetInfo>
732std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
733{
734 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
735
736 // Extract IO and info
737 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
738 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
739 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
740 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
741
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100742 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100743
744 // Create and configure function (we assume that functions have been validated before creation)
745 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
746 std::unique_ptr<IFunction> func;
747
748 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
749 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100750 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100751
752 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000753 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
754 << node.name()
755 << " Type: " << node.type()
756 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100757 << " Data Type: " << input->info()->data_type()
758 << " Input shape: " << input->info()->tensor_shape()
759 << " Weights shape: " << weights->info()->tensor_shape()
760 << " Output shape: " << output->info()->tensor_shape()
761 << std::endl);
762 return func;
763}
764
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100765/** Create a backend layer depth-wise convolution function
766 *
767 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
768 * @tparam TargetInfo Target-specific information
769 *
770 * @param[in] node Node to create the backend function for
771 *
772 * @return Backend depth-wise convolution layer function
773 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100774template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100775std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
776{
777 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
778
779 // Extract IO and info
780 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
781 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
782 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
783 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
784
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100785 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
786
787 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100788 {
789 biases->info()->set_data_type(DataType::S32);
790 }
791
Manuel Bottini05069f02019-09-26 17:18:26 +0100792 const PadStrideInfo conv_info = node.convolution_info();
793 const unsigned int depth_multiplier = node.depth_multiplier();
794 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100795
796 // Create and configure function (we assume that functions have been validated before creation)
797 std::unique_ptr<IFunction> func;
798 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100799
800 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
801 std::string("DepthwiseConvolutionLayer"),
802 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100803
804 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100805 std::ostringstream qss;
806 if(is_quantized)
807 {
808 qss << " Input QuantInfo: " << input->info()->quantization_info()
809 << " Weights QuantInfo: " << weights->info()->quantization_info()
810 << " Output QuantInfo: " << output->info()->quantization_info();
811 }
Pablo Tello32521432018-11-15 14:43:10 +0000812 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
813 << node.name()
814 << " Type: " << func_name
815 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100816 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100817 << " Input shape: " << input->info()->tensor_shape()
818 << " Weights shape: " << weights->info()->tensor_shape()
819 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000820 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000821 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100822 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100823 << std::endl);
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100824 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100825}
826
thecha010a05e6a2020-08-28 18:40:38 +0100827/** Create a backend depth to space layer function
828 *
829 * @tparam DepthToSpaceLayerNode Function Backend depth to space function
830 * @tparam TargetInfo Target-specific information
831 *
832 * @param[in] node Node to create the backend function for
833 *
834 * @return Backend depth to space layer function
835 */
836template <typename DepthToSpaceLayerFunction, typename TargetInfo>
837std::unique_ptr<IFunction> create_depth_to_space_layer(DepthToSpaceLayerNode &node)
838{
839 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
840
841 // Extract IO and info
842 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
843 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
844
845 ARM_COMPUTE_ERROR_ON(input == nullptr);
846 ARM_COMPUTE_ERROR_ON(output == nullptr);
847
848 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000849 auto func = std::make_unique<DepthToSpaceLayerFunction>();
thecha010a05e6a2020-08-28 18:40:38 +0100850 func->configure(input, output, node.block_shape());
851
852 // Log info
853 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
854 << node.name()
855 << " Type: " << node.type()
856 << " Target: " << TargetInfo::TargetType
857 << " Data Type: " << input->info()->data_type()
858 << " Input shape: " << input->info()->tensor_shape()
859 << " Block Size: " << node.block_shape()
860 << " Output shape: " << output->info()->tensor_shape()
861 << std::endl);
862
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100863 return std::move(func);
thecha010a05e6a2020-08-28 18:40:38 +0100864}
865
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000866/** Create a backend dequantize layer function
867 *
868 * @tparam DequantizationLayer Function Backend dequantize function
869 * @tparam TargetInfo Target-specific information
870 *
871 * @param[in] node Node to create the backend function for
872 *
873 * @return Backend dequantize layer function
874 */
875template <typename DequantizationLayerFunction, typename TargetInfo>
876std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
877{
878 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
879
880 // Extract IO and info
881 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
882 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
883
884 ARM_COMPUTE_ERROR_ON(input == nullptr);
885 ARM_COMPUTE_ERROR_ON(output == nullptr);
886
887 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000888 auto func = std::make_unique<DequantizationLayerFunction>();
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000889 func->configure(input, output);
890
891 // Log info
892 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
893 << node.name()
894 << " Type: " << node.type()
895 << " Target: " << TargetInfo::TargetType
896 << " Data Type: " << input->info()->data_type()
897 << " Input shape: " << input->info()->tensor_shape()
898 << " Input quantization info: " << output->info()->quantization_info()
899 << " Output shape: " << output->info()->tensor_shape()
900 << std::endl);
901
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100902 return std::move(func);
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000903}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000904/** Create a backend detection output layer function
905 *
906 * @tparam DetectionOutputLayer Function Backend detection output function
907 * @tparam TargetInfo Target-specific information
908 *
909 * @param[in] node Node to create the backend function for
910 *
911 * @return Backend detection output layer function
912 */
913template <typename DetectionOutputLayerFunction, typename TargetInfo>
914std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
915{
916 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
917
918 // Extract IO and info
919 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
920 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
921 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
922 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
923 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
924
925 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
926 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
927 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
928 ARM_COMPUTE_ERROR_ON(output == nullptr);
929
930 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000931 auto func = std::make_unique<DetectionOutputLayerFunction>();
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000932 func->configure(input0, input1, input2, output, detect_info);
933
934 // Log info
935 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
936 << node.name()
937 << " Type: " << node.type()
938 << " Target: " << TargetInfo::TargetType
939 << " Data Type: " << input0->info()->data_type()
940 << " Input0 shape: " << input0->info()->tensor_shape()
941 << " Input1 shape: " << input1->info()->tensor_shape()
942 << " Input2 shape: " << input2->info()->tensor_shape()
943 << " Output shape: " << output->info()->tensor_shape()
944 << " DetectionOutputLayer info: " << detect_info
945 << std::endl);
946
Georgios Pinitas4d9687e2020-10-21 18:33:36 +0100947 return std::move(func);
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000948}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000949
950/** Create a backend detection post process layer function
951 *
952 * @tparam DetectionPostProcessLayerFunction Backend detection output function
953 * @tparam TargetInfo Target-specific information
954 *
955 * @param[in] node Node to create the backend function for
956 *
957 * @return Backend detection post process layer function
958 */
959template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
960std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
961{
962 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
963
964 // Extract IO and info
965 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
966 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
967 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
968 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
969 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
970 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
971 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
972 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
973
974 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
975 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
976 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
977 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
978 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
979 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
980 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
981
982 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +0000983 auto func = std::make_unique<DetectionPostProcessLayerFunction>();
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000984 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
985
986 // Log info
987 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
988 << node.name()
989 << " Type: " << node.type()
990 << " Target: " << TargetInfo::TargetType
991 << " Data Type: " << input0->info()->data_type()
992 << " Input0 shape: " << input0->info()->tensor_shape()
993 << " Input1 shape: " << input1->info()->tensor_shape()
994 << " Input2 shape: " << input2->info()->tensor_shape()
995 << " Output0 shape: " << output0->info()->tensor_shape()
996 << " Output1 shape: " << output1->info()->tensor_shape()
997 << " Output2 shape: " << output2->info()->tensor_shape()
998 << " Output3 shape: " << output3->info()->tensor_shape()
999 << " DetectionPostProcessLayer info: " << detect_info
1000 << std::endl);
1001
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001002 return std::move(func);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +00001003}
1004
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001005/** Create a backend element-wise operation layer function
1006 *
1007 * @tparam EltwiseFunctions Backend element-wise function
1008 * @tparam TargetInfo Target-specific information
1009 *
1010 * @param[in] node Node to create the backend function for
1011 *
1012 * @return Backend element-wise operation layer function
1013 */
1014template <typename EltwiseFunctions, typename TargetInfo>
1015std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
1016{
1017 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1018
1019 // Extract IO and info
1020 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
1021 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
1022 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1023 const EltwiseOperation eltwise_op = node.eltwise_operation();
1024 const ConvertPolicy convert_policy = node.convert_policy();
Giorgio Arena8b2a7d32020-02-11 17:21:31 +00001025 const ActivationLayerInfo act_info = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001026 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1027 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
1028 ARM_COMPUTE_ERROR_ON(output == nullptr);
1029
1030 std::unique_ptr<IFunction> func = nullptr;
1031 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +01001032 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001033 {
1034 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
1035 std::string("ArithmeticAddition"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +00001036 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001037 }
Georgios Pinitase2220552018-07-20 13:23:44 +01001038 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001039 {
1040 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
1041 std::string("ArithmeticSubtraction"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +00001042 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001043 }
Georgios Pinitase2220552018-07-20 13:23:44 +01001044 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001045 {
1046 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
1047 std::string("PixelWiseMultiplication"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +00001048 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001049 }
thecha01f8e35842020-07-28 17:28:17 +01001050 else if(eltwise_op == EltwiseOperation::Max)
1051 {
1052 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
1053 std::string("ElementwiseMaximum"),
1054 input1, input2, output, act_info);
1055 }
Alessandro Navone6413e492021-02-02 11:39:05 +00001056 else if(eltwise_op == EltwiseOperation::Div)
1057 {
1058 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Division>(
1059 std::string("ArithmeticDivision"),
1060 input1, input2, output, act_info);
1061 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001062 else
1063 {
1064 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
1065 }
1066
1067 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001068 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1069 << node.name()
1070 << " Type: " << node.type()
1071 << " Target: " << TargetInfo::TargetType
1072 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001073 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +00001074 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001075 << std::endl);
1076
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001077 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001078}
1079
Sheri Zhang16dddd22020-05-27 15:03:48 +01001080/** Create a backend unary element-wise operation layer function
1081 *
1082 * @tparam UnaryEltwiseFunctions Backend unary element-wise function
1083 * @tparam TargetInfo Target-specific information
1084 *
1085 * @param[in] node Node to create the backend function for
1086 *
1087 * @return Backend unary element-wise operation layer function
1088 */
1089template <typename UnaryEltwiseFunctions, typename TargetInfo>
1090std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
1091{
1092 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1093
1094 // Extract IO and info
1095 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1096 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1097 const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
1098
1099 ARM_COMPUTE_ERROR_ON(input == nullptr);
1100 ARM_COMPUTE_ERROR_ON(output == nullptr);
1101
1102 std::unique_ptr<IFunction> func = nullptr;
1103 std::string func_name;
1104 if(eltwise_op == UnaryEltwiseOperation::Exp)
1105 {
1106 std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
1107 std::string("Exp"),
1108 input, output);
1109 }
1110 else
1111 {
1112 ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
1113 }
1114
1115 // Log info
1116 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1117 << node.name()
1118 << " Type: " << node.type()
1119 << " Target: " << TargetInfo::TargetType
1120 << " Operation: " << func_name
1121 << " Data Type: " << input->info()->data_type()
1122 << " Shape: " << input->info()->tensor_shape()
1123 << std::endl);
1124
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001125 return std::move(func);
Sheri Zhang16dddd22020-05-27 15:03:48 +01001126}
1127
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001128/** Create a backend flatten layer function
1129 *
1130 * @tparam FlattenLayerFunction Backend flatten function
1131 * @tparam TargetInfo Target-specific information
1132 *
1133 * @param[in] node Node to create the backend function for
1134 *
1135 * @return Backend flatten layer function
1136 */
1137template <typename FlattenLayerFunction, typename TargetInfo>
1138std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
1139{
1140 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1141
1142 // Extract IO and info
1143 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1144 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1145
Georgios Pinitase2220552018-07-20 13:23:44 +01001146 ARM_COMPUTE_ERROR_ON(input == nullptr);
1147 ARM_COMPUTE_ERROR_ON(output == nullptr);
1148
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001149 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001150 auto func = std::make_unique<FlattenLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001151 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001152
1153 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001154 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1155 << node.name()
1156 << " Type: " << node.type()
1157 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001158 << " Data Type: " << input->info()->data_type()
1159 << " Input shape: " << input->info()->tensor_shape()
1160 << " Output shape: " << output->info()->tensor_shape()
1161 << std::endl);
1162
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001163 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001164}
1165
1166/** Create a backend fully connected layer function
1167 *
1168 * @tparam FullyConnectedLayerFunction Backend fully-connected function
1169 * @tparam TargetInfo Target-specific information
1170 *
1171 * @param[in] node Node to create the backend function for
1172 * @param[in] ctx Graph context
1173 *
1174 * @return Backend fully connected layer function
1175 */
1176template <typename FullyConnectedLayerFunction, typename TargetInfo>
1177std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
1178{
1179 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1180
1181 // Extract IO and info
1182 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1183 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
1184 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
1185 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
cfRodf2c022e2021-11-05 11:29:53 +00001186 FullyConnectedLayerInfo fc_info = node.info();
1187 fc_info.enable_fast_math = (node.fast_math_hint() == FastMathHint::Enabled);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001188
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001189 ARM_COMPUTE_ERROR_ON(input == nullptr);
1190 ARM_COMPUTE_ERROR_ON(weights == nullptr);
1191 ARM_COMPUTE_ERROR_ON(output == nullptr);
1192
Georgios Pinitase2220552018-07-20 13:23:44 +01001193 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +01001194 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
1195 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001196 auto func = std::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +01001197 func->configure(input, weights, biases, output, fc_info);
1198
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001199 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
1200
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001201 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001202 std::ostringstream qss;
1203 if(is_quantized)
1204 {
1205 qss << " Input QuantInfo: " << input->info()->quantization_info()
1206 << " Weights QuantInfo: " << weights->info()->quantization_info()
1207 << " Output QuantInfo: " << output->info()->quantization_info();
1208 }
Pablo Tello32521432018-11-15 14:43:10 +00001209 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1210 << node.name()
1211 << " Type: " << node.type()
1212 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001213 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +01001214 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001215 << " Input shape: " << input->info()->tensor_shape()
1216 << " Weights shape: " << weights->info()->tensor_shape()
1217 << " Output shape: " << output->info()->tensor_shape()
1218 << std::endl);
1219
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001220 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001221}
1222
Manuel Bottini5209be52019-02-13 16:34:56 +00001223/** Create a backend generate proposals layer function
1224 *
1225 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
1226 * @tparam TargetInfo Target-specific information
1227 *
1228 * @param[in] node Node to create the backend function for
1229 * @param[in] ctx Graph context
1230 *
1231 * @return Backend generate proposals layer function
1232 */
1233template <typename GenerateProposalsLayerFunction, typename TargetInfo>
1234std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
1235{
1236 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
1237
1238 // Extract IO and info
1239 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
1240 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
1241 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
1242 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
1243 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
1244 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
1245 const GenerateProposalsInfo info = node.info();
1246
1247 ARM_COMPUTE_ERROR_ON(scores == nullptr);
1248 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
1249 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
1250 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
1251 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
1252
1253 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001254 auto func = std::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
Manuel Bottini5209be52019-02-13 16:34:56 +00001255 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1256
1257 // Log info
1258 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1259 << " Target " << TargetInfo::TargetType
1260 << " Data Type: " << scores->info()->data_type()
1261 << " Scores shape: " << scores->info()->tensor_shape()
1262 << " Deltas shape: " << deltas->info()->tensor_shape()
1263 << " Anchors shape: " << anchors->info()->tensor_shape()
1264 << " Proposals shape: " << proposals->info()->tensor_shape()
1265 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1266 << " Scores Out shape: " << scores_out->info()->tensor_shape()
1267 << std::endl);
1268
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001269 return std::move(func);
Manuel Bottini5209be52019-02-13 16:34:56 +00001270}
1271
thecha013603aff2020-09-01 14:52:38 +01001272/** Create a backend l2 normalization layer function
1273 *
1274 * @tparam NormalizationLayerFunction Backend normalization function
1275 * @tparam TargetInfo Target-specific information
1276 *
1277 * @param[in] node Node to create the backend function for
1278 * @param[in] ctx Graph context
1279 *
1280 * @return Backend normalization layer function
1281 */
1282template <typename L2NormalizeLayerFunction, typename TargetInfo>
1283std::unique_ptr<IFunction> create_l2_normalize_layer(L2NormalizeLayerNode &node, GraphContext &ctx)
1284{
1285 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1286
1287 // Extract IO and info
1288 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1289 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1290 int axis = node.axis();
1291 float epsilon = node.epsilon();
1292
1293 ARM_COMPUTE_ERROR_ON(input == nullptr);
1294 ARM_COMPUTE_ERROR_ON(output == nullptr);
1295
1296 // Create and configure function
1297 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001298 auto func = std::make_unique<L2NormalizeLayerFunction>(mm);
thecha013603aff2020-09-01 14:52:38 +01001299 func->configure(input, output, axis, epsilon);
1300
1301 // Log info
1302 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1303 << node.name()
1304 << " Type: " << node.type()
1305 << " Target: " << TargetInfo::TargetType
1306 << " Data Type: " << input->info()->data_type()
1307 << " Input shape: " << input->info()->tensor_shape()
1308 << " Output shape: " << output->info()->tensor_shape()
1309 << " Axis: " << axis
1310 << " Epsilon: " << epsilon
1311 << std::endl);
1312
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001313 return std::move(func);
thecha013603aff2020-09-01 14:52:38 +01001314}
1315
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001316/** Create a backend normalization layer function
1317 *
1318 * @tparam NormalizationLayerFunction Backend normalization function
1319 * @tparam TargetInfo Target-specific information
1320 *
1321 * @param[in] node Node to create the backend function for
1322 * @param[in] ctx Graph context
1323 *
1324 * @return Backend normalization layer function
1325 */
1326template <typename NormalizationLayerFunction, typename TargetInfo>
1327std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1328{
1329 ARM_COMPUTE_UNUSED(ctx);
1330
1331 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1332
1333 // Extract IO and info
1334 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1335 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1336 const NormalizationLayerInfo norm_info = node.normalization_info();
1337 ARM_COMPUTE_ERROR_ON(input == nullptr);
1338 ARM_COMPUTE_ERROR_ON(output == nullptr);
1339
1340 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001341 auto func = std::make_unique<NormalizationLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001342 func->configure(input, output, norm_info);
1343
1344 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001345 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1346 << node.name()
1347 << " Type: " << node.type()
1348 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001349 << " Data Type: " << input->info()->data_type()
1350 << " Input shape: " << input->info()->tensor_shape()
1351 << " Output shape: " << output->info()->tensor_shape()
1352 << " Normalization info: " << norm_info.type()
1353 << std::endl);
1354
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001355 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001356}
1357
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001358/** Create a backend normalize planar YUV layer function
1359 *
1360 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1361 * @tparam TargetInfo Target-specific information
1362 *
1363 * @param[in] node Node to create the backend function for
1364 *
1365 * @return Backend normalize plnar YUV layer function
1366 */
1367template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1368std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1369{
1370 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1371
1372 // Extract IO and info
1373 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1374 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1375 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1376 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1377 ARM_COMPUTE_ERROR_ON(input == nullptr);
1378 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1379 ARM_COMPUTE_ERROR_ON(std == nullptr);
1380 ARM_COMPUTE_ERROR_ON(output == nullptr);
1381
1382 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001383 auto func = std::make_unique<NormalizePlanarYUVLayerFunction>();
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001384 func->configure(input, output, mean, std);
1385
1386 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001387 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1388 << node.name()
1389 << " Type: " << node.type()
1390 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001391 << " Data Type: " << input->info()->data_type()
1392 << " Shape: " << input->info()->tensor_shape()
1393 << std::endl);
1394
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001395 return std::move(func);
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001396}
1397
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001398/** Create a backend pad layer function
1399 *
1400 * @tparam PadLayerFunction Backend pad function
1401 * @tparam TargetInfo Target-specific information
1402 *
1403 * @param[in] node Node to create the backend function for
1404 *
1405 * @return Backend pad layer function
1406 */
1407template <typename PadLayerFunction, typename TargetInfo>
1408std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1409{
1410 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1411
1412 // Extract IO and info
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001413 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1414 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1415 const PaddingList &padding = node.padding();
1416 const PixelValue pad_value = node.pad_value();
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001417 ARM_COMPUTE_ERROR_ON(input == nullptr);
1418 ARM_COMPUTE_ERROR_ON(output == nullptr);
1419
1420 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001421 auto func = std::make_unique<PadLayerFunction>();
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001422 func->configure(input, output, padding, pad_value);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001423
1424 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001425 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1426 << node.name()
1427 << " Type: " << node.type()
1428 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001429 << " Data Type: " << input->info()->data_type()
1430 << " Input shape: " << input->info()->tensor_shape()
1431 << " Output shape: " << output->info()->tensor_shape()
1432 << std::endl);
1433
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001434 return std::move(func);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001435}
1436
Georgios Pinitas57c48242018-08-02 13:41:49 +01001437/** Create a backend permute layer function
1438 *
1439 * @tparam PermuteLayerFunction Backend permute function
1440 * @tparam TargetInfo Target-specific information
1441 *
1442 * @param[in] node Node to create the backend function for
1443 *
1444 * @return Backend permute layer function
1445 */
1446template <typename PermuteLayerFunction, typename TargetInfo>
1447std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1448{
1449 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1450
1451 // Extract IO and info
1452 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1453 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1454 const PermutationVector &perm = node.permutation_vector();
1455 ARM_COMPUTE_ERROR_ON(input == nullptr);
1456 ARM_COMPUTE_ERROR_ON(output == nullptr);
1457
1458 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001459 auto func = std::make_unique<PermuteLayerFunction>();
Georgios Pinitas57c48242018-08-02 13:41:49 +01001460 func->configure(input, output, perm);
1461
1462 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001463 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1464 << node.name()
1465 << " Type: " << node.type()
1466 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001467 << " Data Type: " << input->info()->data_type()
1468 << " Input shape: " << input->info()->tensor_shape()
1469 << " Output shape: " << output->info()->tensor_shape()
1470 << " Permutation vector: " << perm
1471 << std::endl);
1472
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001473 return std::move(func);
Georgios Pinitas57c48242018-08-02 13:41:49 +01001474}
1475
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001476/** Create a backend pooling layer function
1477 *
1478 * @tparam PoolingLayerFunction Backend pooling function
1479 * @tparam TargetInfo Target-specific information
1480 *
1481 * @param[in] node Node to create the backend function for
1482 *
1483 * @return Backend pooling layer function
1484 */
1485template <typename PoolingLayerFunction, typename TargetInfo>
1486std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1487{
1488 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1489
1490 // Extract IO and info
1491 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1492 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1493 const PoolingLayerInfo pool_info = node.pooling_info();
1494 ARM_COMPUTE_ERROR_ON(input == nullptr);
1495 ARM_COMPUTE_ERROR_ON(output == nullptr);
1496
1497 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001498 auto func = std::make_unique<PoolingLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001499 func->configure(input, output, pool_info);
1500
1501 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001502 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1503 << node.name()
1504 << " Type: " << node.type()
1505 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001506 << " Data Type: " << input->info()->data_type()
1507 << " Input shape: " << input->info()->tensor_shape()
1508 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001509 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001510 << std::endl);
1511
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001512 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001513}
1514
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001515/** Create a backend PRelu layer function
1516 *
1517 * @tparam PReluFunction Backend PRelu function
1518 * @tparam TargetInfo Target-specific information
1519 *
1520 * @param[in] node Node to create the backend function for
1521 *
1522 * @return Backend PRelu layer function
1523 */
1524template <typename PReluFunction, typename TargetInfo>
1525std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1526{
1527 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1528
1529 // Extract IO and info
1530 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1531 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1532 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1533 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1534 ARM_COMPUTE_ERROR_ON(output == nullptr);
1535
1536 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001537 auto func = std::make_unique<PReluFunction>();
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001538 func->configure(input, alpha, output);
1539
1540 // Log info
1541 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1542 << node.name()
1543 << " Type: " << node.type()
1544 << " Target: " << TargetInfo::TargetType
1545 << " Data Type: " << input->info()->data_type()
1546 << " Input shape: " << input->info()->tensor_shape()
1547 << " Output shape: " << output->info()->tensor_shape()
1548 << std::endl);
1549
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001550 return std::move(func);
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001551}
1552
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001553/** Create a backend print layer function
1554 *
1555 * @tparam TargetInfo Target-specific information
1556 *
1557 * @param[in] node Node to create the backend function for
1558 *
1559 * @return Backend print layer function
1560 */
1561template <typename TargetInfo>
1562std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1563{
1564 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1565
1566 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1567 ARM_COMPUTE_ERROR_ON(input == nullptr);
1568 ARM_COMPUTE_UNUSED(input);
1569
1570 // Log info
1571 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1572 << node.name()
1573 << " Type: " << node.type()
1574 << " Target: " << TargetInfo::TargetType
1575 << " Data Type: " << input->info()->data_type()
1576 << " Input shape: " << input->info()->tensor_shape()
1577 << std::endl);
1578
1579 return nullptr;
1580}
1581
Pablo Tello32521432018-11-15 14:43:10 +00001582/** Create a backend priorbox layer function
1583 *
1584 * @tparam PriorBoxLayerFunction Backend priorbox function
1585 * @tparam TargetInfo Target-specific information
1586 *
1587 * @param[in] node Node to create the backend function for
1588 *
1589 * @return Backend priorbox layer function
1590 */
1591template <typename PriorBoxLayerFunction, typename TargetInfo>
1592std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1593{
1594 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1595
1596 // Extract IO and info
1597 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1598 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1599 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1600 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1601 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1602 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1603 ARM_COMPUTE_ERROR_ON(output == nullptr);
1604
1605 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001606 auto func = std::make_unique<PriorBoxLayerFunction>();
Pablo Tello32521432018-11-15 14:43:10 +00001607 func->configure(input0, input1, output, prior_info);
1608
1609 // Log info
1610 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1611 << node.name()
1612 << " Type: " << node.type()
1613 << " Target: " << TargetInfo::TargetType
1614 << " Data Type: " << input0->info()->data_type()
1615 << " Input0 shape: " << input0->info()->tensor_shape()
1616 << " Input1 shape: " << input1->info()->tensor_shape()
1617 << " Output shape: " << output->info()->tensor_shape()
1618 << " PriorBoxLayer info: " << prior_info
1619 << std::endl);
1620
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001621 return std::move(func);
Pablo Tello32521432018-11-15 14:43:10 +00001622}
1623
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001624/** Create a backend quantization layer function
1625 *
1626 * @tparam QuantizationLayerFunction Backend quantization function
1627 * @tparam TargetInfo Target-specific information
1628 *
1629 * @param[in] node Node to create the backend function for
1630 *
1631 * @return Backend quantization layer function
1632 */
1633template <typename QuantizationLayerFunction, typename TargetInfo>
1634std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1635{
1636 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1637
1638 // Extract IO and info
1639 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1640 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1641 ARM_COMPUTE_ERROR_ON(input == nullptr);
1642 ARM_COMPUTE_ERROR_ON(output == nullptr);
1643
1644 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001645 auto func = std::make_unique<QuantizationLayerFunction>();
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001646 func->configure(input, output);
1647
1648 // Log info
1649 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1650 << node.name()
1651 << " Type: " << node.type()
1652 << " Target: " << TargetInfo::TargetType
1653 << " Data Type: " << input->info()->data_type()
1654 << " Input shape: " << input->info()->tensor_shape()
1655 << " Output shape: " << output->info()->tensor_shape()
1656 << std::endl);
1657
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001658 return std::move(func);
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001659}
1660
thecha01d64444b2020-09-07 14:50:21 +01001661/** Create a backend reduction operation layer function
1662 *
1663 * @tparam ReductionOperationFunction Backend reduction operation function
1664 * @tparam TargetInfo Target-specific information
1665 *
1666 * @param[in] node Node to create the backend function for
1667 * @param[in] ctx Graph context
1668 *
1669 * @return Backend reduction sum layer function
1670 */
1671template <typename ReductionOperationFunction, typename TargetInfo>
1672std::unique_ptr<IFunction> create_reduction_operation_layer(ReductionLayerNode &node, GraphContext &ctx)
1673{
1674 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1675
1676 // Extract IO and info
1677 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1678 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1679 ReductionOperation op = node.op();
1680 int axis = node.axis();
1681 bool keep_dims = node.keep_dims();
1682 ARM_COMPUTE_ERROR_ON(input == nullptr);
1683 ARM_COMPUTE_ERROR_ON(output == nullptr);
1684
1685 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001686 auto func = std::make_unique<ReductionOperationFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
thecha01d64444b2020-09-07 14:50:21 +01001687 func->configure(input, output, axis, op, keep_dims);
1688
1689 // Log info
1690 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1691 << node.name()
1692 << " Type: " << node.type()
1693 << " Target: " << TargetInfo::TargetType
1694 << " Data Type: " << input->info()->data_type()
1695 << " Input shape: " << input->info()->tensor_shape()
1696 << " Output shape: " << output->info()->tensor_shape()
1697 << " Operation: " << op
1698 << " Axis: " << axis
1699 << " Keep dimensions:" << keep_dims
1700 << std::endl);
1701
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001702 return std::move(func);
thecha01d64444b2020-09-07 14:50:21 +01001703}
1704
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001705/** Create a backend reorg layer function
1706 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001707 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001708 * @tparam TargetInfo Target-specific information
1709 *
1710 * @param[in] node Node to create the backend function for
1711 *
1712 * @return Backend reshape layer function
1713 */
1714template <typename ReorgLayerFunction, typename TargetInfo>
1715std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1716{
1717 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1718
1719 // Extract IO and info
1720 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1721 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1722 ARM_COMPUTE_ERROR_ON(input == nullptr);
1723 ARM_COMPUTE_ERROR_ON(output == nullptr);
1724
1725 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001726 auto func = std::make_unique<ReorgLayerFunction>();
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001727 func->configure(input, output, node.stride());
1728
1729 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001730 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1731 << node.name()
1732 << " Type: " << node.type()
1733 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001734 << " Data Type: " << input->info()->data_type()
1735 << " Input shape: " << input->info()->tensor_shape()
1736 << " Output shape: " << output->info()->tensor_shape()
1737 << std::endl);
1738
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001739 return std::move(func);
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001740}
1741
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001742/** Create a backend reshape layer function
1743 *
1744 * @tparam ReshapeLayerFunction Backend reshape function
1745 * @tparam TargetInfo Target-specific information
1746 *
1747 * @param[in] node Node to create the backend function for
1748 *
1749 * @return Backend reshape layer function
1750 */
1751template <typename ReshapeLayerFunction, typename TargetInfo>
1752std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1753{
1754 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1755
1756 // Extract IO and info
1757 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1758 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1759 ARM_COMPUTE_ERROR_ON(input == nullptr);
1760 ARM_COMPUTE_ERROR_ON(output == nullptr);
1761
1762 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001763 auto func = std::make_unique<ReshapeLayerFunction>();
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001764 func->configure(input, output);
1765
1766 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001767 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1768 << node.name()
1769 << " Type: " << node.type()
1770 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001771 << " Data Type: " << input->info()->data_type()
1772 << " Input shape: " << input->info()->tensor_shape()
1773 << " Output shape: " << output->info()->tensor_shape()
1774 << std::endl);
1775
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001776 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001777}
1778
1779/** Create a backend resize layer function
1780 *
1781 * @tparam ResizeLayerFunction Backend resize function
1782 * @tparam TargetInfo Target-specific information
1783 *
1784 * @param[in] node Node to create the backend function for
1785 *
1786 * @return Backend resize layer function
1787 */
1788template <typename ResizeLayerFunction, typename TargetInfo>
1789std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1790{
1791 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1792
1793 // Extract IO and info
1794 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1795 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1796 ARM_COMPUTE_ERROR_ON(input == nullptr);
1797 ARM_COMPUTE_ERROR_ON(output == nullptr);
1798 const InterpolationPolicy policy = node.policy();
1799
1800 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001801 auto func = std::make_unique<ResizeLayerFunction>();
Georgios Pinitasc53266e2020-12-09 03:11:53 +00001802 func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT, PixelValue(), SamplingPolicy::CENTER, false, false });
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001803
1804 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001805 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1806 << node.name()
1807 << " Type: " << node.type()
1808 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001809 << " Data Type: " << input->info()->data_type()
1810 << " Input shape: " << input->info()->tensor_shape()
1811 << " Output shape: " << output->info()->tensor_shape()
1812 << " Interpolation: " << policy
1813 << std::endl);
1814
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001815 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001816}
1817
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001818/** Create a backend ROI align layer function
1819 *
1820 * @tparam ROIAlignLayerFunction ROI Align function
1821 * @tparam TargetInfo Target-specific information
1822 *
1823 * @param[in] node Node to create the backend function for
1824 *
1825 * @return ROI Align layer function
1826 */
1827template <typename ROIAlignLayerFunction, typename TargetInfo>
1828std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1829{
1830 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1831
1832 // Extract IO and info
1833 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1834 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1835 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1836 ARM_COMPUTE_ERROR_ON(input == nullptr);
1837 ARM_COMPUTE_ERROR_ON(output == nullptr);
1838 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1839
1840 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1841
1842 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001843 auto func = std::make_unique<ROIAlignLayerFunction>();
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001844
1845 func->configure(input, rois, output, pool_info);
1846
1847 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001848 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1849 << node.name()
1850 << " Type: " << node.type()
1851 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001852 << " Data Type: " << input->info()->data_type()
1853 << " Input shape: " << input->info()->tensor_shape()
1854 << " Output shape: " << output->info()->tensor_shape()
1855 << " ROIs shape: " << rois->info()->tensor_shape()
1856 << " ROIPooling width: " << pool_info.pooled_width()
1857 << " ROIPooling height: " << pool_info.pooled_height()
1858 << std::endl);
1859
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001860 return std::move(func);
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001861}
1862
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001863/** Create a backend slice layer function
1864 *
1865 * @tparam SliceLayerFunction Backend slice function
1866 * @tparam TargetInfo Target-specific information
1867 *
1868 * @param[in] node Node to create the backend function for
1869 *
1870 * @return Backend slice layer function
1871 */
1872template <typename SliceLayerFunction, typename TargetInfo>
1873std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1874{
1875 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1876
1877 // Extract IO and info
1878 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1879 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1880 ARM_COMPUTE_ERROR_ON(input == nullptr);
1881 ARM_COMPUTE_ERROR_ON(output == nullptr);
1882
1883 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001884 auto func = std::make_unique<SliceLayerFunction>();
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001885 func->configure(input, output, node.starts(), node.ends());
1886
1887 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001888 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1889 << node.name()
1890 << " Type: " << node.type()
1891 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001892 << " Data Type: " << input->info()->data_type()
1893 << " Input shape: " << input->info()->tensor_shape()
1894 << " Output shape: " << output->info()->tensor_shape()
1895 << std::endl);
1896
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001897 return std::move(func);
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001898}
1899
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001900/** Create a backend softmax layer function
1901 *
1902 * @tparam SoftmaxLayerFunction Backend softmax function
1903 * @tparam TargetInfo Target-specific information
1904 *
1905 * @param[in] node Node to create the backend function for
1906 * @param[in] ctx Graph context
1907 *
1908 * @return Backend softmax layer function
1909 */
1910template <typename SoftmaxLayerFunction, typename TargetInfo>
1911std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1912{
1913 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1914
1915 // Extract IO and info
1916 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1917 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1918 const float beta = node.beta();
1919 ARM_COMPUTE_ERROR_ON(input == nullptr);
1920 ARM_COMPUTE_ERROR_ON(output == nullptr);
1921
1922 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001923 auto func = std::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001924 func->configure(input, output, beta);
1925
1926 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001927 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1928 << node.name()
1929 << " Type: " << node.type()
1930 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001931 << " Data Type: " << input->info()->data_type()
1932 << " Input shape: " << input->info()->tensor_shape()
1933 << " Output shape: " << output->info()->tensor_shape()
1934 << std::endl);
1935
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001936 return std::move(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001937}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001938
1939/** Create a backend layer stack function
1940 *
1941 * @tparam StackLayerFunction Backend stack function
1942 * @tparam TargetInfo Target-specific information
1943 *
1944 * @param[in] node Node to create the backend function for
1945 *
1946 * @return Backend stack layer function
1947 */
1948template <typename StackLayerFunction, typename TargetInfo>
1949std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1950{
1951 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1952 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1953
1954 // Extract IO and info
1955 std::vector<typename TargetInfo::TensorType *> inputs;
1956 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1957 {
1958 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1959 }
1960 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1961 const int axis = node.axis();
1962
1963 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00001964 auto func = std::make_unique<StackLayerFunction>();
Michele Di Giorgioec699752019-03-22 15:25:32 +00001965 func->configure(inputs, axis, output);
1966
1967 // Log info
1968 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1969 << node.name()
1970 << " Type: " << node.type()
1971 << " Target: " << TargetInfo::TargetType
1972 << " Data Type: " << output->info()->data_type()
1973 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1974 << " Output shape: " << output->info()->tensor_shape()
1975 << " Num Inputs: " << inputs.size()
1976 << " Axis: " << axis
1977 << std::endl);
1978
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01001979 return std::move(func);
Michele Di Giorgioec699752019-03-22 15:25:32 +00001980}
thecha012bfadd92020-08-12 17:25:51 +01001981
1982/** Create a backend slice layer function
1983 *
1984 * @tparam StridedSliceLayerFunction Backend strided slice function
1985 * @tparam TargetInfo Target-specific information
1986 *
1987 * @param[in] node Node to create the backend function for
1988 *
1989 * @return Backend strided slice layer function
1990 */
1991template <typename StridedSliceLayerFunction, typename TargetInfo>
1992std::unique_ptr<IFunction> create_strided_slice_layer(StridedSliceLayerNode &node)
1993{
1994 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1995
1996 // Extract IO and info
1997 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1998 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1999 Coordinates starts = node.starts();
2000 Coordinates ends = node.ends();
2001 BiStrides strides = node.strides();
2002 StridedSliceLayerInfo info = node.strided_slice_info();
2003
2004 ARM_COMPUTE_ERROR_ON(input == nullptr);
2005 ARM_COMPUTE_ERROR_ON(output == nullptr);
2006
2007 // Create and configure function
Georgios Pinitas40f51a62020-11-21 03:04:18 +00002008 auto func = std::make_unique<StridedSliceLayerFunction>();
thecha012bfadd92020-08-12 17:25:51 +01002009 func->configure(input, output, starts, ends, strides, info.begin_mask(), info.end_mask(), info.shrink_axis_mask());
2010
2011 // Log info
2012 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
2013 << node.name()
2014 << " Type: " << node.type()
2015 << " Target: " << TargetInfo::TargetType
2016 << " Data Type: " << input->info()->data_type()
2017 << " Input shape: " << input->info()->tensor_shape()
2018 << " Output shape: " << output->info()->tensor_shape()
2019 << std::endl);
2020
Georgios Pinitas4d9687e2020-10-21 18:33:36 +01002021 return std::move(func);
thecha012bfadd92020-08-12 17:25:51 +01002022}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01002023} // namespace detail
2024} // namespace backends
2025} // namespace graph
2026} // namespace arm_compute
2027
Michalis Spyrouf4643372019-11-29 16:17:13 +00002028#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */