blob: 382b18a8884162004c4ba434c42f5c06ac06f89c [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00002 * Copyright (c) 2018-2020 ARM Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010031#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000032#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010033#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010034#include "arm_compute/graph/backends/Utils.h"
35#include "arm_compute/graph/nodes/Nodes.h"
36
37#include "arm_compute/core/Error.h"
38#include "arm_compute/core/Helpers.h"
39#include "arm_compute/core/ITensorInfo.h"
40#include "arm_compute/core/utils/misc/Cast.h"
41
42namespace arm_compute
43{
44namespace graph
45{
46namespace backends
47{
48namespace detail
49{
Georgios Pinitas0b192e82020-02-20 17:09:28 +000050// Address rule DR-9R5 (1579. Return by converting move constructor)
51#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5))
52#define RETURN_UNIQUE_PTR(x) (x)
53#else /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
54#define RETURN_UNIQUE_PTR(x) (std::move(x))
55#endif /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
56
Georgios Pinitasda2491f2018-06-01 17:49:09 +010057/** Returns backing tensor of a given tensor
58 *
59 * @tparam TargetInfo Target information
60 *
61 * @param[in] tensor Tensor to extract the backing tensor from
62 *
63 * @return Backing tensor if present else nullptr
64 */
65template <typename TargetInfo>
66typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
67{
68 typename TargetInfo::TensorType *backing_tensor = nullptr;
69 if(tensor != nullptr)
70 {
71 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
72 // Get backing tensor handle
73 ITensorHandle *tensor_handle = tensor->handle();
74 // Get backing tensor
75 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
76 }
77
78 return backing_tensor;
79}
80
81template <typename TargetInfo>
82void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
83{
84 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000085 << " Target: " << TargetInfo::TargetType
86 << " ID: " << node.id()
87 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010088 << std::endl);
89
90 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
91 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
92 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010093 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010094}
95
96/** Creates a backend activation layer function
97 *
98 * @tparam ActivationLayerFunction Backend activation function
99 * @tparam TargetInfo Target-specific information
100 *
101 * @param[in] node Node to create the backend function for
102 *
103 * @return Backend activation layer function
104 */
105template <typename ActivationLayerFunction, typename TargetInfo>
106std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
107{
108 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
109
110 // Extract IO and info
111 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
112 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
113 const ActivationLayerInfo act_info = node.activation_info();
114
115 // Create function
116 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
117 func->configure(input, output, act_info);
118
Pablo Tello32521432018-11-15 14:43:10 +0000119 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
120 << node.name()
121 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000122 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100123 << " Data Type: " << input->info()->data_type()
124 << " Shape: " << input->info()->tensor_shape()
125 << " Activation function: " << act_info.activation()
126 << " a: " << act_info.a()
127 << " b: " << act_info.b()
128 << " InPlace : " << is_in_place_operation(input, output)
129 << std::endl);
130
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000131 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100132}
133
134/** Create a backend batch normalization layer function
135 *
136 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
137 * @tparam TargetInfo Target-specific information
138 *
139 * @param[in] node Node to create the backend function for
140 *
141 * @return Backend batch normalization layer function
142 */
143template <typename BatchNormalizationLayerFunction, typename TargetInfo>
144std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
145{
146 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
147
148 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000149 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
150 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
151 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
152 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
153 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
154
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100155 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
156 const float epsilon = node.epsilon();
157 const ActivationLayerInfo fused_act = node.fused_activation();
158
159 // Create and configure function
160 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
161 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
162
163 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000164 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
165 << node.name()
166 << " Type: " << node.type()
167 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100168 << " Data Type: " << input->info()->data_type()
169 << " Shape: " << input->info()->tensor_shape()
170 << " Epsilon: " << epsilon << " "
171 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000172 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100173 << std::endl);
174
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000175 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100176}
177
giuros01acce5042019-02-21 17:32:34 +0000178/** Create a backend batch normalization layer function
179 *
180 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
181 * @tparam TargetInfo Target-specific information
182 *
183 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000184 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000185 *
186 * @return Backend batch normalization layer function
187 */
188template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000189std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000190{
191 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
192
193 // Extract IO and info
194 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
195 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
196 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
197 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
198 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
199 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
200 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
201
202 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
203
204 const PadStrideInfo conv_info = node.convolution_info();
205 const unsigned int num_groups = node.num_groups();
206 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
207 const ActivationLayerInfo fused_act = node.fused_activation();
208 const float epsilon = node.epsilon();
209
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000210 // Create and configure function (we assume that functions have been validated before creation)
211 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
212 std::unique_ptr<IFunction> func;
213 std::string func_name;
214
215 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
216
giuros01acce5042019-02-21 17:32:34 +0000217 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000218 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
219 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000220
221 // Log info
222 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
223 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100224 << " Type: " << node.type()
225 << " Target: " << TargetInfo::TargetType
226 << " Data Type: " << input->info()->data_type()
227 << " Input shape: " << input->info()->tensor_shape()
228 << " Weights shape: " << weights->info()->tensor_shape()
229 << " Output shape: " << output->info()->tensor_shape()
230 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
231 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000232 return RETURN_UNIQUE_PTR(func);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100233}
234
235/** Create a backend fused depthwise convolution batch normalization layer function
236 *
237 * @tparam FusedLayerTypes Fused layer types
238 * @tparam TargetInfo Target-specific information
239 *
240 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000241 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100242 *
243 * @return Backend fused depthwise convolution batch normalization layer function
244 */
245template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000246std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100247{
248 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
249
250 // Extract IO and info
251 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
252 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
253 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
254 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
255 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
256 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
257 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
258
259 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
260
261 const PadStrideInfo conv_info = node.convolution_info();
262 const unsigned int depth_multiplier = node.depth_multiplier();
263 const ActivationLayerInfo fused_act = node.fused_activation();
264 const float epsilon = node.epsilon();
265
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000266 // Create and configure function (we assume that functions have been validated before creation)
267 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
268 std::unique_ptr<IFunction> func;
269 std::string func_name;
270
271 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
272
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100273 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000274 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
275 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100276
277 // Log info
278 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
279 << node.name()
280 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000281 << " Target: " << TargetInfo::TargetType
282 << " Data Type: " << input->info()->data_type()
283 << " Input shape: " << input->info()->tensor_shape()
284 << " Weights shape: " << weights->info()->tensor_shape()
285 << " Output shape: " << output->info()->tensor_shape()
286 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
287 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000288 return RETURN_UNIQUE_PTR(func);
giuros01acce5042019-02-21 17:32:34 +0000289}
290
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100291/** Create a backend bounding box transform layer function
292 *
293 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
294 * @tparam TargetInfo Target-specific information
295 *
296 * @param[in] node Node to create the backend function for
297 *
298 * @return Backend bounding box transform layer function
299 */
300template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
301std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
302{
303 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
304
305 // Extract IO and info
306 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
307 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
308 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
309 const BoundingBoxTransformInfo bbox_info = node.info();
310
311 // Create and configure function
312 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
313 func->configure(input, output, deltas, bbox_info);
314
315 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000316 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
317 << node.name()
318 << " Type: " << node.type()
319 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100320 << " Data Type: " << input->info()->data_type()
321 << " Shape: " << input->info()->tensor_shape()
322 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
323 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
324 << std::endl);
325
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000326 return RETURN_UNIQUE_PTR(func);
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100327}
328
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100329/** Create a backend channel shuffle layer function
330 *
331 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
332 * @tparam TargetInfo Target-specific information
333 *
334 * @param[in] node Node to create the backend function for
335 *
336 * @return Backend channel shuffle layer function
337 */
338template <typename ChannelShuffleLayerFunction, typename TargetInfo>
339std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
340{
341 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
342
343 // Extract IO and info
344 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
345 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
346 const unsigned int num_groups = node.num_groups();
347
348 // Create function
349 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
350 func->configure(input, output, num_groups);
351
Pablo Tello32521432018-11-15 14:43:10 +0000352 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
353 << node.name()
354 << " Type: " << node.type()
355 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100356 << " Data Type: " << input->info()->data_type()
357 << " Shape: " << input->info()->tensor_shape()
358 << " Num groups: " << num_groups
359 << std::endl);
360
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000361 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362}
363
Georgios Pinitase2220552018-07-20 13:23:44 +0100364/** Create a backend layer concatenate function
365 *
366 * @tparam ConcatenateLayerFunction Backend concatenate function
367 * @tparam TargetInfo Target-specific information
368 *
369 * @param[in] node Node to create the backend function for
370 *
371 * @return Backend concatenate layer function
372 */
373template <typename ConcatenateLayerFunction, typename TargetInfo>
374std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
375{
376 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
377 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
378
379 // Return nullptr if depth concatenate is switched off
380 if(!node.is_enabled())
381 {
382 return nullptr;
383 }
384
385 // Extract IO and info
386 std::vector<typename TargetInfo::TensorType *> inputs;
387 for(unsigned int i = 0; i < node.num_inputs(); ++i)
388 {
389 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
390 }
391 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100392 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
393 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100394
395 // Create and configure function
396 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
397 func->configure(inputs, output, concat_axis);
398
399 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000400 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
401 std::ostringstream qss;
402 if(is_quantized)
403 {
404 qss << " Output QuantInfo: " << output->info()->quantization_info();
405 }
Pablo Tello32521432018-11-15 14:43:10 +0000406 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
407 << node.name()
408 << " Type: " << node.type()
409 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100410 << " Data Type: " << output->info()->data_type()
411 << " Shape: " << output->info()->tensor_shape()
412 << " Num Inputs: " << inputs.size()
413 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000414 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100415 << std::endl);
416
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000417 return RETURN_UNIQUE_PTR(func);
Georgios Pinitase2220552018-07-20 13:23:44 +0100418}
419
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100420/** Create a backend convolution layer function
421 *
422 * @tparam ConvolutionLayerFunctions Backend convolution functions
423 * @tparam TargetInfo Target-specific information
424 *
425 * @param[in] node Node to create the backend function for
426 * @param[in] ctx Graph context
427 *
428 * @return Backend convolution layer function
429 */
430template <typename ConvolutionLayerFunctions, typename TargetInfo>
431std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
432{
433 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
434
435 // Extract IO and info
436 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
437 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
438 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
439 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
440
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100441 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
442
443 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100444 {
445 biases->info()->set_data_type(DataType::S32);
446 }
447
Georgios Pinitas08346e92018-10-16 19:10:46 +0100448 const PadStrideInfo conv_info = node.convolution_info();
449 const unsigned int num_groups = node.num_groups();
450 const ConvolutionMethod conv_algorithm = node.convolution_method();
451 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
452 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453
454 // Create and configure function (we assume that functions have been validated before creation)
455 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
456 std::unique_ptr<IFunction> func;
457 std::string func_name;
458
Georgios Pinitase2220552018-07-20 13:23:44 +0100459 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100460 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100461 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100462 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
463 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100464 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100466 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100467 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100468 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100469 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
470 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100471 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100472 }
473 else if(conv_algorithm == ConvolutionMethod::GEMM)
474 {
475 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
476 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100477 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100478 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100479 }
480 else
481 {
482 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
483 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100484 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100485 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100486 }
487
488 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100489 std::ostringstream qss;
490 if(is_quantized)
491 {
492 qss << " Input QuantInfo: " << input->info()->quantization_info()
493 << " Weights QuantInfo: " << weights->info()->quantization_info()
494 << " Output QuantInfo: " << output->info()->quantization_info();
495 }
Pablo Tello32521432018-11-15 14:43:10 +0000496 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
497 << node.name()
498 << " Type: " << func_name
499 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100501 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100502 << " Input shape: " << input->info()->tensor_shape()
503 << " Weights shape: " << weights->info()->tensor_shape()
504 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000505 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100506 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100507 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000508 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100509}
510
511/** Create a backend deconvolution layer function
512 *
513 * @tparam DeconvolutionLayerFunction Backend deconvolution function
514 * @tparam TargetInfo Target-specific information
515 *
516 * @param[in] node Node to create the backend function for
517 * @param[in] ctx Graph context
518 *
519 * @return Backend deconvolution layer function
520 */
521template <typename DeconvolutionLayerFunction, typename TargetInfo>
522std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
523{
524 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
525
526 // Extract IO and info
527 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
528 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
529 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
530 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
531
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100532 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100533
534 // Create and configure function (we assume that functions have been validated before creation)
535 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
536 std::unique_ptr<IFunction> func;
537
538 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
539 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100540 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100541
542 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000543 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
544 << node.name()
545 << " Type: " << node.type()
546 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100547 << " Data Type: " << input->info()->data_type()
548 << " Input shape: " << input->info()->tensor_shape()
549 << " Weights shape: " << weights->info()->tensor_shape()
550 << " Output shape: " << output->info()->tensor_shape()
551 << std::endl);
552 return func;
553}
554
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100555/** Create a backend layer depth-wise convolution function
556 *
557 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
558 * @tparam TargetInfo Target-specific information
559 *
560 * @param[in] node Node to create the backend function for
561 *
562 * @return Backend depth-wise convolution layer function
563 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100564template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100565std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
566{
567 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
568
569 // Extract IO and info
570 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
571 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
572 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
573 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
574
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100575 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
576
577 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100578 {
579 biases->info()->set_data_type(DataType::S32);
580 }
581
Manuel Bottini05069f02019-09-26 17:18:26 +0100582 const PadStrideInfo conv_info = node.convolution_info();
583 const unsigned int depth_multiplier = node.depth_multiplier();
584 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100585
586 // Create and configure function (we assume that functions have been validated before creation)
587 std::unique_ptr<IFunction> func;
588 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100589
590 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
591 std::string("DepthwiseConvolutionLayer"),
592 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100593
594 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100595 std::ostringstream qss;
596 if(is_quantized)
597 {
598 qss << " Input QuantInfo: " << input->info()->quantization_info()
599 << " Weights QuantInfo: " << weights->info()->quantization_info()
600 << " Output QuantInfo: " << output->info()->quantization_info();
601 }
Pablo Tello32521432018-11-15 14:43:10 +0000602 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
603 << node.name()
604 << " Type: " << func_name
605 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100606 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100607 << " Input shape: " << input->info()->tensor_shape()
608 << " Weights shape: " << weights->info()->tensor_shape()
609 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000610 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000611 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100612 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100613 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000614 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100615}
616
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000617/** Create a backend dequantize layer function
618 *
619 * @tparam DequantizationLayer Function Backend dequantize function
620 * @tparam TargetInfo Target-specific information
621 *
622 * @param[in] node Node to create the backend function for
623 *
624 * @return Backend dequantize layer function
625 */
626template <typename DequantizationLayerFunction, typename TargetInfo>
627std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
628{
629 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
630
631 // Extract IO and info
632 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
633 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
634
635 ARM_COMPUTE_ERROR_ON(input == nullptr);
636 ARM_COMPUTE_ERROR_ON(output == nullptr);
637
638 // Create and configure function
639 auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
640 func->configure(input, output);
641
642 // Log info
643 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
644 << node.name()
645 << " Type: " << node.type()
646 << " Target: " << TargetInfo::TargetType
647 << " Data Type: " << input->info()->data_type()
648 << " Input shape: " << input->info()->tensor_shape()
649 << " Input quantization info: " << output->info()->quantization_info()
650 << " Output shape: " << output->info()->tensor_shape()
651 << std::endl);
652
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000653 return RETURN_UNIQUE_PTR(func);
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000654}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000655/** Create a backend detection output layer function
656 *
657 * @tparam DetectionOutputLayer Function Backend detection output function
658 * @tparam TargetInfo Target-specific information
659 *
660 * @param[in] node Node to create the backend function for
661 *
662 * @return Backend detection output layer function
663 */
664template <typename DetectionOutputLayerFunction, typename TargetInfo>
665std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
666{
667 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
668
669 // Extract IO and info
670 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
671 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
672 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
673 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
674 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
675
676 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
677 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
678 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
679 ARM_COMPUTE_ERROR_ON(output == nullptr);
680
681 // Create and configure function
682 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
683 func->configure(input0, input1, input2, output, detect_info);
684
685 // Log info
686 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
687 << node.name()
688 << " Type: " << node.type()
689 << " Target: " << TargetInfo::TargetType
690 << " Data Type: " << input0->info()->data_type()
691 << " Input0 shape: " << input0->info()->tensor_shape()
692 << " Input1 shape: " << input1->info()->tensor_shape()
693 << " Input2 shape: " << input2->info()->tensor_shape()
694 << " Output shape: " << output->info()->tensor_shape()
695 << " DetectionOutputLayer info: " << detect_info
696 << std::endl);
697
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000698 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000699}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000700
701/** Create a backend detection post process layer function
702 *
703 * @tparam DetectionPostProcessLayerFunction Backend detection output function
704 * @tparam TargetInfo Target-specific information
705 *
706 * @param[in] node Node to create the backend function for
707 *
708 * @return Backend detection post process layer function
709 */
710template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
711std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
712{
713 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
714
715 // Extract IO and info
716 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
717 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
718 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
719 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
720 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
721 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
722 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
723 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
724
725 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
726 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
727 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
728 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
729 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
730 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
731 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
732
733 // Create and configure function
734 auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
735 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
736
737 // Log info
738 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
739 << node.name()
740 << " Type: " << node.type()
741 << " Target: " << TargetInfo::TargetType
742 << " Data Type: " << input0->info()->data_type()
743 << " Input0 shape: " << input0->info()->tensor_shape()
744 << " Input1 shape: " << input1->info()->tensor_shape()
745 << " Input2 shape: " << input2->info()->tensor_shape()
746 << " Output0 shape: " << output0->info()->tensor_shape()
747 << " Output1 shape: " << output1->info()->tensor_shape()
748 << " Output2 shape: " << output2->info()->tensor_shape()
749 << " Output3 shape: " << output3->info()->tensor_shape()
750 << " DetectionPostProcessLayer info: " << detect_info
751 << std::endl);
752
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000753 return RETURN_UNIQUE_PTR(func);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000754}
755
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100756/** Create a backend element-wise operation layer function
757 *
758 * @tparam EltwiseFunctions Backend element-wise function
759 * @tparam TargetInfo Target-specific information
760 *
761 * @param[in] node Node to create the backend function for
762 *
763 * @return Backend element-wise operation layer function
764 */
765template <typename EltwiseFunctions, typename TargetInfo>
766std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
767{
768 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
769
770 // Extract IO and info
771 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
772 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
773 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
774 const EltwiseOperation eltwise_op = node.eltwise_operation();
775 const ConvertPolicy convert_policy = node.convert_policy();
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000776 const ActivationLayerInfo act_info = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100777 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
778 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
779 ARM_COMPUTE_ERROR_ON(output == nullptr);
780
781 std::unique_ptr<IFunction> func = nullptr;
782 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100783 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100784 {
785 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
786 std::string("ArithmeticAddition"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000787 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100788 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100789 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100790 {
791 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
792 std::string("ArithmeticSubtraction"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000793 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100794 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100795 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100796 {
797 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
798 std::string("PixelWiseMultiplication"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000799 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100800 }
801 else
802 {
803 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
804 }
805
806 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000807 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
808 << node.name()
809 << " Type: " << node.type()
810 << " Target: " << TargetInfo::TargetType
811 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100812 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000813 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100814 << std::endl);
815
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000816 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100817}
818
819/** Create a backend flatten layer function
820 *
821 * @tparam FlattenLayerFunction Backend flatten function
822 * @tparam TargetInfo Target-specific information
823 *
824 * @param[in] node Node to create the backend function for
825 *
826 * @return Backend flatten layer function
827 */
828template <typename FlattenLayerFunction, typename TargetInfo>
829std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
830{
831 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
832
833 // Extract IO and info
834 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
835 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
836
Georgios Pinitase2220552018-07-20 13:23:44 +0100837 ARM_COMPUTE_ERROR_ON(input == nullptr);
838 ARM_COMPUTE_ERROR_ON(output == nullptr);
839
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100840 // Create and configure function
841 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
842 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100843
844 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000845 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
846 << node.name()
847 << " Type: " << node.type()
848 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100849 << " Data Type: " << input->info()->data_type()
850 << " Input shape: " << input->info()->tensor_shape()
851 << " Output shape: " << output->info()->tensor_shape()
852 << std::endl);
853
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000854 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100855}
856
857/** Create a backend fully connected layer function
858 *
859 * @tparam FullyConnectedLayerFunction Backend fully-connected function
860 * @tparam TargetInfo Target-specific information
861 *
862 * @param[in] node Node to create the backend function for
863 * @param[in] ctx Graph context
864 *
865 * @return Backend fully connected layer function
866 */
867template <typename FullyConnectedLayerFunction, typename TargetInfo>
868std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
869{
870 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
871
872 // Extract IO and info
873 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
874 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
875 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
876 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100877 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100878
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100879 ARM_COMPUTE_ERROR_ON(input == nullptr);
880 ARM_COMPUTE_ERROR_ON(weights == nullptr);
881 ARM_COMPUTE_ERROR_ON(output == nullptr);
882
Georgios Pinitase2220552018-07-20 13:23:44 +0100883 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100884 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
885 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
886 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +0100887 func->configure(input, weights, biases, output, fc_info);
888
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100889 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
890
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100891 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100892 std::ostringstream qss;
893 if(is_quantized)
894 {
895 qss << " Input QuantInfo: " << input->info()->quantization_info()
896 << " Weights QuantInfo: " << weights->info()->quantization_info()
897 << " Output QuantInfo: " << output->info()->quantization_info();
898 }
Pablo Tello32521432018-11-15 14:43:10 +0000899 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
900 << node.name()
901 << " Type: " << node.type()
902 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100903 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100904 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100905 << " Input shape: " << input->info()->tensor_shape()
906 << " Weights shape: " << weights->info()->tensor_shape()
907 << " Output shape: " << output->info()->tensor_shape()
908 << std::endl);
909
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000910 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100911}
912
Manuel Bottini5209be52019-02-13 16:34:56 +0000913/** Create a backend generate proposals layer function
914 *
915 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
916 * @tparam TargetInfo Target-specific information
917 *
918 * @param[in] node Node to create the backend function for
919 * @param[in] ctx Graph context
920 *
921 * @return Backend generate proposals layer function
922 */
923template <typename GenerateProposalsLayerFunction, typename TargetInfo>
924std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
925{
926 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
927
928 // Extract IO and info
929 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
930 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
931 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
932 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
933 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
934 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
935 const GenerateProposalsInfo info = node.info();
936
937 ARM_COMPUTE_ERROR_ON(scores == nullptr);
938 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
939 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
940 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
941 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
942
943 // Create and configure function
944 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
945 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
946
947 // Log info
948 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
949 << " Target " << TargetInfo::TargetType
950 << " Data Type: " << scores->info()->data_type()
951 << " Scores shape: " << scores->info()->tensor_shape()
952 << " Deltas shape: " << deltas->info()->tensor_shape()
953 << " Anchors shape: " << anchors->info()->tensor_shape()
954 << " Proposals shape: " << proposals->info()->tensor_shape()
955 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
956 << " Scores Out shape: " << scores_out->info()->tensor_shape()
957 << std::endl);
958
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000959 return RETURN_UNIQUE_PTR(func);
Manuel Bottini5209be52019-02-13 16:34:56 +0000960}
961
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100962/** Create a backend normalization layer function
963 *
964 * @tparam NormalizationLayerFunction Backend normalization function
965 * @tparam TargetInfo Target-specific information
966 *
967 * @param[in] node Node to create the backend function for
968 * @param[in] ctx Graph context
969 *
970 * @return Backend normalization layer function
971 */
972template <typename NormalizationLayerFunction, typename TargetInfo>
973std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
974{
975 ARM_COMPUTE_UNUSED(ctx);
976
977 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
978
979 // Extract IO and info
980 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
981 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
982 const NormalizationLayerInfo norm_info = node.normalization_info();
983 ARM_COMPUTE_ERROR_ON(input == nullptr);
984 ARM_COMPUTE_ERROR_ON(output == nullptr);
985
986 // Create and configure function
987 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
988 func->configure(input, output, norm_info);
989
990 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000991 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
992 << node.name()
993 << " Type: " << node.type()
994 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100995 << " Data Type: " << input->info()->data_type()
996 << " Input shape: " << input->info()->tensor_shape()
997 << " Output shape: " << output->info()->tensor_shape()
998 << " Normalization info: " << norm_info.type()
999 << std::endl);
1000
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001001 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001002}
1003
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001004/** Create a backend normalize planar YUV layer function
1005 *
1006 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1007 * @tparam TargetInfo Target-specific information
1008 *
1009 * @param[in] node Node to create the backend function for
1010 *
1011 * @return Backend normalize plnar YUV layer function
1012 */
1013template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1014std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1015{
1016 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1017
1018 // Extract IO and info
1019 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1020 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1021 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1022 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1023 ARM_COMPUTE_ERROR_ON(input == nullptr);
1024 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1025 ARM_COMPUTE_ERROR_ON(std == nullptr);
1026 ARM_COMPUTE_ERROR_ON(output == nullptr);
1027
1028 // Create and configure function
1029 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1030 func->configure(input, output, mean, std);
1031
1032 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001033 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1034 << node.name()
1035 << " Type: " << node.type()
1036 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001037 << " Data Type: " << input->info()->data_type()
1038 << " Shape: " << input->info()->tensor_shape()
1039 << std::endl);
1040
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001041 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001042}
1043
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001044/** Create a backend pad layer function
1045 *
1046 * @tparam PadLayerFunction Backend pad function
1047 * @tparam TargetInfo Target-specific information
1048 *
1049 * @param[in] node Node to create the backend function for
1050 *
1051 * @return Backend pad layer function
1052 */
1053template <typename PadLayerFunction, typename TargetInfo>
1054std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1055{
1056 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1057
1058 // Extract IO and info
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001059 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1060 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1061 const PaddingList &padding = node.padding();
1062 const PixelValue pad_value = node.pad_value();
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001063 ARM_COMPUTE_ERROR_ON(input == nullptr);
1064 ARM_COMPUTE_ERROR_ON(output == nullptr);
1065
1066 // Create and configure function
1067 auto func = support::cpp14::make_unique<PadLayerFunction>();
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001068 func->configure(input, output, padding, pad_value);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001069
1070 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001071 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1072 << node.name()
1073 << " Type: " << node.type()
1074 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001075 << " Data Type: " << input->info()->data_type()
1076 << " Input shape: " << input->info()->tensor_shape()
1077 << " Output shape: " << output->info()->tensor_shape()
1078 << std::endl);
1079
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001080 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001081}
1082
Georgios Pinitas57c48242018-08-02 13:41:49 +01001083/** Create a backend permute layer function
1084 *
1085 * @tparam PermuteLayerFunction Backend permute function
1086 * @tparam TargetInfo Target-specific information
1087 *
1088 * @param[in] node Node to create the backend function for
1089 *
1090 * @return Backend permute layer function
1091 */
1092template <typename PermuteLayerFunction, typename TargetInfo>
1093std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1094{
1095 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1096
1097 // Extract IO and info
1098 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1099 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1100 const PermutationVector &perm = node.permutation_vector();
1101 ARM_COMPUTE_ERROR_ON(input == nullptr);
1102 ARM_COMPUTE_ERROR_ON(output == nullptr);
1103
1104 // Create and configure function
1105 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1106 func->configure(input, output, perm);
1107
1108 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001109 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1110 << node.name()
1111 << " Type: " << node.type()
1112 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001113 << " Data Type: " << input->info()->data_type()
1114 << " Input shape: " << input->info()->tensor_shape()
1115 << " Output shape: " << output->info()->tensor_shape()
1116 << " Permutation vector: " << perm
1117 << std::endl);
1118
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001119 return RETURN_UNIQUE_PTR(func);
Georgios Pinitas57c48242018-08-02 13:41:49 +01001120}
1121
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001122/** Create a backend pooling layer function
1123 *
1124 * @tparam PoolingLayerFunction Backend pooling function
1125 * @tparam TargetInfo Target-specific information
1126 *
1127 * @param[in] node Node to create the backend function for
1128 *
1129 * @return Backend pooling layer function
1130 */
1131template <typename PoolingLayerFunction, typename TargetInfo>
1132std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1133{
1134 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1135
1136 // Extract IO and info
1137 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1138 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1139 const PoolingLayerInfo pool_info = node.pooling_info();
1140 ARM_COMPUTE_ERROR_ON(input == nullptr);
1141 ARM_COMPUTE_ERROR_ON(output == nullptr);
1142
1143 // Create and configure function
1144 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1145 func->configure(input, output, pool_info);
1146
1147 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001148 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1149 << node.name()
1150 << " Type: " << node.type()
1151 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001152 << " Data Type: " << input->info()->data_type()
1153 << " Input shape: " << input->info()->tensor_shape()
1154 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001155 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001156 << std::endl);
1157
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001158 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001159}
1160
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001161/** Create a backend PRelu layer function
1162 *
1163 * @tparam PReluFunction Backend PRelu function
1164 * @tparam TargetInfo Target-specific information
1165 *
1166 * @param[in] node Node to create the backend function for
1167 *
1168 * @return Backend PRelu layer function
1169 */
1170template <typename PReluFunction, typename TargetInfo>
1171std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1172{
1173 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1174
1175 // Extract IO and info
1176 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1177 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1178 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1179 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1180 ARM_COMPUTE_ERROR_ON(output == nullptr);
1181
1182 // Create and configure function
1183 auto func = support::cpp14::make_unique<PReluFunction>();
1184 func->configure(input, alpha, output);
1185
1186 // Log info
1187 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1188 << node.name()
1189 << " Type: " << node.type()
1190 << " Target: " << TargetInfo::TargetType
1191 << " Data Type: " << input->info()->data_type()
1192 << " Input shape: " << input->info()->tensor_shape()
1193 << " Output shape: " << output->info()->tensor_shape()
1194 << std::endl);
1195
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001196 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001197}
1198
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001199/** Create a backend print layer function
1200 *
1201 * @tparam TargetInfo Target-specific information
1202 *
1203 * @param[in] node Node to create the backend function for
1204 *
1205 * @return Backend print layer function
1206 */
1207template <typename TargetInfo>
1208std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1209{
1210 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1211
1212 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1213 ARM_COMPUTE_ERROR_ON(input == nullptr);
1214 ARM_COMPUTE_UNUSED(input);
1215
1216 // Log info
1217 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1218 << node.name()
1219 << " Type: " << node.type()
1220 << " Target: " << TargetInfo::TargetType
1221 << " Data Type: " << input->info()->data_type()
1222 << " Input shape: " << input->info()->tensor_shape()
1223 << std::endl);
1224
1225 return nullptr;
1226}
1227
Pablo Tello32521432018-11-15 14:43:10 +00001228/** Create a backend priorbox layer function
1229 *
1230 * @tparam PriorBoxLayerFunction Backend priorbox function
1231 * @tparam TargetInfo Target-specific information
1232 *
1233 * @param[in] node Node to create the backend function for
1234 *
1235 * @return Backend priorbox layer function
1236 */
1237template <typename PriorBoxLayerFunction, typename TargetInfo>
1238std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1239{
1240 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1241
1242 // Extract IO and info
1243 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1244 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1245 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1246 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1247 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1248 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1249 ARM_COMPUTE_ERROR_ON(output == nullptr);
1250
1251 // Create and configure function
1252 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1253 func->configure(input0, input1, output, prior_info);
1254
1255 // Log info
1256 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1257 << node.name()
1258 << " Type: " << node.type()
1259 << " Target: " << TargetInfo::TargetType
1260 << " Data Type: " << input0->info()->data_type()
1261 << " Input0 shape: " << input0->info()->tensor_shape()
1262 << " Input1 shape: " << input1->info()->tensor_shape()
1263 << " Output shape: " << output->info()->tensor_shape()
1264 << " PriorBoxLayer info: " << prior_info
1265 << std::endl);
1266
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001267 return RETURN_UNIQUE_PTR(func);
Pablo Tello32521432018-11-15 14:43:10 +00001268}
1269
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001270/** Create a backend quantization layer function
1271 *
1272 * @tparam QuantizationLayerFunction Backend quantization function
1273 * @tparam TargetInfo Target-specific information
1274 *
1275 * @param[in] node Node to create the backend function for
1276 *
1277 * @return Backend quantization layer function
1278 */
1279template <typename QuantizationLayerFunction, typename TargetInfo>
1280std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1281{
1282 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1283
1284 // Extract IO and info
1285 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1286 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1287 ARM_COMPUTE_ERROR_ON(input == nullptr);
1288 ARM_COMPUTE_ERROR_ON(output == nullptr);
1289
1290 // Create and configure function
1291 auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1292 func->configure(input, output);
1293
1294 // Log info
1295 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1296 << node.name()
1297 << " Type: " << node.type()
1298 << " Target: " << TargetInfo::TargetType
1299 << " Data Type: " << input->info()->data_type()
1300 << " Input shape: " << input->info()->tensor_shape()
1301 << " Output shape: " << output->info()->tensor_shape()
1302 << std::endl);
1303
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001304 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001305}
1306
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001307/** Create a backend reorg layer function
1308 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001309 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001310 * @tparam TargetInfo Target-specific information
1311 *
1312 * @param[in] node Node to create the backend function for
1313 *
1314 * @return Backend reshape layer function
1315 */
1316template <typename ReorgLayerFunction, typename TargetInfo>
1317std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1318{
1319 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1320
1321 // Extract IO and info
1322 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1323 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1324 ARM_COMPUTE_ERROR_ON(input == nullptr);
1325 ARM_COMPUTE_ERROR_ON(output == nullptr);
1326
1327 // Create and configure function
1328 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1329 func->configure(input, output, node.stride());
1330
1331 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001332 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1333 << node.name()
1334 << " Type: " << node.type()
1335 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001336 << " Data Type: " << input->info()->data_type()
1337 << " Input shape: " << input->info()->tensor_shape()
1338 << " Output shape: " << output->info()->tensor_shape()
1339 << std::endl);
1340
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001341 return RETURN_UNIQUE_PTR(func);
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001342}
1343
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001344/** Create a backend reshape layer function
1345 *
1346 * @tparam ReshapeLayerFunction Backend reshape function
1347 * @tparam TargetInfo Target-specific information
1348 *
1349 * @param[in] node Node to create the backend function for
1350 *
1351 * @return Backend reshape layer function
1352 */
1353template <typename ReshapeLayerFunction, typename TargetInfo>
1354std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1355{
1356 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1357
1358 // Extract IO and info
1359 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1360 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1361 ARM_COMPUTE_ERROR_ON(input == nullptr);
1362 ARM_COMPUTE_ERROR_ON(output == nullptr);
1363
1364 // Create and configure function
1365 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1366 func->configure(input, output);
1367
1368 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001369 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1370 << node.name()
1371 << " Type: " << node.type()
1372 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001373 << " Data Type: " << input->info()->data_type()
1374 << " Input shape: " << input->info()->tensor_shape()
1375 << " Output shape: " << output->info()->tensor_shape()
1376 << std::endl);
1377
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001378 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001379}
1380
1381/** Create a backend resize layer function
1382 *
1383 * @tparam ResizeLayerFunction Backend resize function
1384 * @tparam TargetInfo Target-specific information
1385 *
1386 * @param[in] node Node to create the backend function for
1387 *
1388 * @return Backend resize layer function
1389 */
1390template <typename ResizeLayerFunction, typename TargetInfo>
1391std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1392{
1393 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1394
1395 // Extract IO and info
1396 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1397 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1398 ARM_COMPUTE_ERROR_ON(input == nullptr);
1399 ARM_COMPUTE_ERROR_ON(output == nullptr);
1400 const InterpolationPolicy policy = node.policy();
1401
1402 // Create and configure function
1403 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
1404 func->configure(input, output, policy, BorderMode::CONSTANT);
1405
1406 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001407 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1408 << node.name()
1409 << " Type: " << node.type()
1410 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001411 << " Data Type: " << input->info()->data_type()
1412 << " Input shape: " << input->info()->tensor_shape()
1413 << " Output shape: " << output->info()->tensor_shape()
1414 << " Interpolation: " << policy
1415 << std::endl);
1416
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001417 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001418}
1419
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001420/** Create a backend ROI align layer function
1421 *
1422 * @tparam ROIAlignLayerFunction ROI Align function
1423 * @tparam TargetInfo Target-specific information
1424 *
1425 * @param[in] node Node to create the backend function for
1426 *
1427 * @return ROI Align layer function
1428 */
1429template <typename ROIAlignLayerFunction, typename TargetInfo>
1430std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1431{
1432 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1433
1434 // Extract IO and info
1435 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1436 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1437 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1438 ARM_COMPUTE_ERROR_ON(input == nullptr);
1439 ARM_COMPUTE_ERROR_ON(output == nullptr);
1440 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1441
1442 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1443
1444 // Create and configure function
1445 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1446
1447 func->configure(input, rois, output, pool_info);
1448
1449 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001450 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1451 << node.name()
1452 << " Type: " << node.type()
1453 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001454 << " Data Type: " << input->info()->data_type()
1455 << " Input shape: " << input->info()->tensor_shape()
1456 << " Output shape: " << output->info()->tensor_shape()
1457 << " ROIs shape: " << rois->info()->tensor_shape()
1458 << " ROIPooling width: " << pool_info.pooled_width()
1459 << " ROIPooling height: " << pool_info.pooled_height()
1460 << std::endl);
1461
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001462 return RETURN_UNIQUE_PTR(func);
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001463}
1464
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001465/** Create a backend slice layer function
1466 *
1467 * @tparam SliceLayerFunction Backend slice function
1468 * @tparam TargetInfo Target-specific information
1469 *
1470 * @param[in] node Node to create the backend function for
1471 *
1472 * @return Backend slice layer function
1473 */
1474template <typename SliceLayerFunction, typename TargetInfo>
1475std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1476{
1477 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1478
1479 // Extract IO and info
1480 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1481 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1482 ARM_COMPUTE_ERROR_ON(input == nullptr);
1483 ARM_COMPUTE_ERROR_ON(output == nullptr);
1484
1485 // Create and configure function
1486 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1487 func->configure(input, output, node.starts(), node.ends());
1488
1489 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001490 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1491 << node.name()
1492 << " Type: " << node.type()
1493 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001494 << " Data Type: " << input->info()->data_type()
1495 << " Input shape: " << input->info()->tensor_shape()
1496 << " Output shape: " << output->info()->tensor_shape()
1497 << std::endl);
1498
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001499 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001500}
1501
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001502/** Create a backend softmax layer function
1503 *
1504 * @tparam SoftmaxLayerFunction Backend softmax function
1505 * @tparam TargetInfo Target-specific information
1506 *
1507 * @param[in] node Node to create the backend function for
1508 * @param[in] ctx Graph context
1509 *
1510 * @return Backend softmax layer function
1511 */
1512template <typename SoftmaxLayerFunction, typename TargetInfo>
1513std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1514{
1515 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1516
1517 // Extract IO and info
1518 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1519 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1520 const float beta = node.beta();
1521 ARM_COMPUTE_ERROR_ON(input == nullptr);
1522 ARM_COMPUTE_ERROR_ON(output == nullptr);
1523
1524 // Create and configure function
1525 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1526 func->configure(input, output, beta);
1527
1528 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001529 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1530 << node.name()
1531 << " Type: " << node.type()
1532 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001533 << " Data Type: " << input->info()->data_type()
1534 << " Input shape: " << input->info()->tensor_shape()
1535 << " Output shape: " << output->info()->tensor_shape()
1536 << std::endl);
1537
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001538 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001539}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001540
1541/** Create a backend layer stack function
1542 *
1543 * @tparam StackLayerFunction Backend stack function
1544 * @tparam TargetInfo Target-specific information
1545 *
1546 * @param[in] node Node to create the backend function for
1547 *
1548 * @return Backend stack layer function
1549 */
1550template <typename StackLayerFunction, typename TargetInfo>
1551std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1552{
1553 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1554 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1555
1556 // Extract IO and info
1557 std::vector<typename TargetInfo::TensorType *> inputs;
1558 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1559 {
1560 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1561 }
1562 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1563 const int axis = node.axis();
1564
1565 // Create and configure function
1566 auto func = support::cpp14::make_unique<StackLayerFunction>();
1567 func->configure(inputs, axis, output);
1568
1569 // Log info
1570 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1571 << node.name()
1572 << " Type: " << node.type()
1573 << " Target: " << TargetInfo::TargetType
1574 << " Data Type: " << output->info()->data_type()
1575 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1576 << " Output shape: " << output->info()->tensor_shape()
1577 << " Num Inputs: " << inputs.size()
1578 << " Axis: " << axis
1579 << std::endl);
1580
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001581 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioec699752019-03-22 15:25:32 +00001582}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001583/** Create a backend Upsample layer function
1584 *
1585 * @tparam UpsampleLayerFunction Backend Upsample function
1586 * @tparam TargetInfo Target-specific information
1587 *
1588 * @param[in] node Node to create the backend function for
1589 * @param[in] ctx Graph context
1590 *
1591 * @return Backend Upsample layer function
1592 */
1593template <typename UpsampleLayerFunction, typename TargetInfo>
1594std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1595{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001596 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001597 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1598
1599 // Extract IO and info
1600 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1601 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1602 const Size2D info = node.info();
1603 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1604 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1605 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1606 ARM_COMPUTE_ERROR_ON(input == nullptr);
1607 ARM_COMPUTE_ERROR_ON(output == nullptr);
1608
1609 // Create and configure function
1610 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1611 func->configure(input, output, info, upsampling_policy);
1612
1613 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001614 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1615 << node.name()
1616 << " Type: " << node.type()
1617 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001618 << " Data Type: " << input->info()->data_type()
1619 << " Input shape: " << input->info()->tensor_shape()
1620 << " Output shape: " << output->info()->tensor_shape()
1621 << " Strides: " << info
1622 << " Upsampling policy: " << upsampling_policy
1623 << std::endl);
1624
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001625 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001626}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001627/** Create a backend YOLO layer function
1628 *
1629 * @tparam YoloLayerFunction Backend YOLO function
1630 * @tparam TargetInfo Target-specific information
1631 *
1632 * @param[in] node Node to create the backend function for
1633 * @param[in] ctx Graph context
1634 *
1635 * @return Backend YOLO layer function
1636 */
1637template <typename YOLOlayerFunction, typename TargetInfo>
1638std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1639{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001640 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001641 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1642
1643 // Extract IO and info
1644 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1645 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1646 const ActivationLayerInfo act_info = node.activation_info();
1647 const int32_t num_classes = node.num_classes();
1648 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1649 ARM_COMPUTE_ERROR_ON(input == nullptr);
1650 ARM_COMPUTE_ERROR_ON(output == nullptr);
1651
1652 // Create and configure function
1653 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1654 func->configure(input, output, act_info, num_classes);
1655
1656 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001657 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1658 << node.name()
1659 << " Type: " << node.type()
1660 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001661 << " Data Type: " << input->info()->data_type()
1662 << " Input shape: " << input->info()->tensor_shape()
1663 << " Output shape: " << output->info()->tensor_shape()
1664 << " Activation function: " << act_info.activation()
1665 << " Num classes: " << num_classes
1666 << std::endl);
1667
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001668 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001669}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001670} // namespace detail
1671} // namespace backends
1672} // namespace graph
1673} // namespace arm_compute
1674
Michalis Spyrouf4643372019-11-29 16:17:13 +00001675#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */