blob: 44b24b58bf1d93dffb6762abfe27780721b3b9ea [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00002 * Copyright (c) 2018-2020 ARM Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010031#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000032#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010033#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010034#include "arm_compute/graph/backends/Utils.h"
35#include "arm_compute/graph/nodes/Nodes.h"
36
37#include "arm_compute/core/Error.h"
38#include "arm_compute/core/Helpers.h"
39#include "arm_compute/core/ITensorInfo.h"
40#include "arm_compute/core/utils/misc/Cast.h"
41
42namespace arm_compute
43{
44namespace graph
45{
46namespace backends
47{
48namespace detail
49{
Georgios Pinitas0b192e82020-02-20 17:09:28 +000050// Address rule DR-9R5 (1579. Return by converting move constructor)
51#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5))
52#define RETURN_UNIQUE_PTR(x) (x)
53#else /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
54#define RETURN_UNIQUE_PTR(x) (std::move(x))
55#endif /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
56
Georgios Pinitasda2491f2018-06-01 17:49:09 +010057/** Returns backing tensor of a given tensor
58 *
59 * @tparam TargetInfo Target information
60 *
61 * @param[in] tensor Tensor to extract the backing tensor from
62 *
63 * @return Backing tensor if present else nullptr
64 */
65template <typename TargetInfo>
66typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
67{
68 typename TargetInfo::TensorType *backing_tensor = nullptr;
69 if(tensor != nullptr)
70 {
71 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
72 // Get backing tensor handle
73 ITensorHandle *tensor_handle = tensor->handle();
74 // Get backing tensor
75 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
76 }
77
78 return backing_tensor;
79}
80
81template <typename TargetInfo>
82void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
83{
84 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000085 << " Target: " << TargetInfo::TargetType
86 << " ID: " << node.id()
87 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010088 << std::endl);
89
90 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
91 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
92 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010093 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010094}
95
96/** Creates a backend activation layer function
97 *
98 * @tparam ActivationLayerFunction Backend activation function
99 * @tparam TargetInfo Target-specific information
100 *
101 * @param[in] node Node to create the backend function for
102 *
103 * @return Backend activation layer function
104 */
105template <typename ActivationLayerFunction, typename TargetInfo>
106std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
107{
108 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
109
110 // Extract IO and info
111 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
112 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
113 const ActivationLayerInfo act_info = node.activation_info();
114
115 // Create function
116 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
117 func->configure(input, output, act_info);
118
Pablo Tello32521432018-11-15 14:43:10 +0000119 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
120 << node.name()
121 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000122 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100123 << " Data Type: " << input->info()->data_type()
124 << " Shape: " << input->info()->tensor_shape()
125 << " Activation function: " << act_info.activation()
126 << " a: " << act_info.a()
127 << " b: " << act_info.b()
128 << " InPlace : " << is_in_place_operation(input, output)
129 << std::endl);
130
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000131 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100132}
133
134/** Create a backend batch normalization layer function
135 *
136 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
137 * @tparam TargetInfo Target-specific information
138 *
139 * @param[in] node Node to create the backend function for
140 *
141 * @return Backend batch normalization layer function
142 */
143template <typename BatchNormalizationLayerFunction, typename TargetInfo>
144std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
145{
146 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
147
148 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000149 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
150 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
151 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
152 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
153 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
154
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100155 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
156 const float epsilon = node.epsilon();
157 const ActivationLayerInfo fused_act = node.fused_activation();
158
159 // Create and configure function
160 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
161 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
162
163 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000164 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
165 << node.name()
166 << " Type: " << node.type()
167 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100168 << " Data Type: " << input->info()->data_type()
169 << " Shape: " << input->info()->tensor_shape()
170 << " Epsilon: " << epsilon << " "
171 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000172 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100173 << std::endl);
174
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000175 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100176}
177
giuros01acce5042019-02-21 17:32:34 +0000178/** Create a backend batch normalization layer function
179 *
180 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
181 * @tparam TargetInfo Target-specific information
182 *
183 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000184 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000185 *
186 * @return Backend batch normalization layer function
187 */
188template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000189std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000190{
191 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
192
193 // Extract IO and info
194 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
195 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
196 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
197 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
198 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
199 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
200 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
201
202 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
203
204 const PadStrideInfo conv_info = node.convolution_info();
205 const unsigned int num_groups = node.num_groups();
206 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
207 const ActivationLayerInfo fused_act = node.fused_activation();
208 const float epsilon = node.epsilon();
209
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000210 // Create and configure function (we assume that functions have been validated before creation)
211 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
212 std::unique_ptr<IFunction> func;
213 std::string func_name;
214
215 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
216
giuros01acce5042019-02-21 17:32:34 +0000217 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000218 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
219 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000220
221 // Log info
222 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
223 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100224 << " Type: " << node.type()
225 << " Target: " << TargetInfo::TargetType
226 << " Data Type: " << input->info()->data_type()
227 << " Input shape: " << input->info()->tensor_shape()
228 << " Weights shape: " << weights->info()->tensor_shape()
229 << " Output shape: " << output->info()->tensor_shape()
230 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
231 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000232 return RETURN_UNIQUE_PTR(func);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100233}
234
235/** Create a backend fused depthwise convolution batch normalization layer function
236 *
237 * @tparam FusedLayerTypes Fused layer types
238 * @tparam TargetInfo Target-specific information
239 *
240 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000241 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100242 *
243 * @return Backend fused depthwise convolution batch normalization layer function
244 */
245template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000246std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100247{
248 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
249
250 // Extract IO and info
251 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
252 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
253 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
254 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
255 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
256 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
257 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
258
259 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
260
261 const PadStrideInfo conv_info = node.convolution_info();
262 const unsigned int depth_multiplier = node.depth_multiplier();
263 const ActivationLayerInfo fused_act = node.fused_activation();
264 const float epsilon = node.epsilon();
265
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000266 // Create and configure function (we assume that functions have been validated before creation)
267 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
268 std::unique_ptr<IFunction> func;
269 std::string func_name;
270
271 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
272
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100273 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000274 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
275 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100276
277 // Log info
278 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
279 << node.name()
280 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000281 << " Target: " << TargetInfo::TargetType
282 << " Data Type: " << input->info()->data_type()
283 << " Input shape: " << input->info()->tensor_shape()
284 << " Weights shape: " << weights->info()->tensor_shape()
285 << " Output shape: " << output->info()->tensor_shape()
286 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
287 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000288 return RETURN_UNIQUE_PTR(func);
giuros01acce5042019-02-21 17:32:34 +0000289}
290
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100291/** Create a backend bounding box transform layer function
292 *
293 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
294 * @tparam TargetInfo Target-specific information
295 *
296 * @param[in] node Node to create the backend function for
297 *
298 * @return Backend bounding box transform layer function
299 */
300template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
301std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
302{
303 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
304
305 // Extract IO and info
306 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
307 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
308 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
309 const BoundingBoxTransformInfo bbox_info = node.info();
310
311 // Create and configure function
312 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
313 func->configure(input, output, deltas, bbox_info);
314
315 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000316 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
317 << node.name()
318 << " Type: " << node.type()
319 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100320 << " Data Type: " << input->info()->data_type()
321 << " Shape: " << input->info()->tensor_shape()
322 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
323 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
324 << std::endl);
325
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000326 return RETURN_UNIQUE_PTR(func);
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100327}
328
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100329/** Create a backend channel shuffle layer function
330 *
331 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
332 * @tparam TargetInfo Target-specific information
333 *
334 * @param[in] node Node to create the backend function for
335 *
336 * @return Backend channel shuffle layer function
337 */
338template <typename ChannelShuffleLayerFunction, typename TargetInfo>
339std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
340{
341 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
342
343 // Extract IO and info
344 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
345 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
346 const unsigned int num_groups = node.num_groups();
347
348 // Create function
349 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
350 func->configure(input, output, num_groups);
351
Pablo Tello32521432018-11-15 14:43:10 +0000352 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
353 << node.name()
354 << " Type: " << node.type()
355 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100356 << " Data Type: " << input->info()->data_type()
357 << " Shape: " << input->info()->tensor_shape()
358 << " Num groups: " << num_groups
359 << std::endl);
360
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000361 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362}
363
Georgios Pinitase2220552018-07-20 13:23:44 +0100364/** Create a backend layer concatenate function
365 *
366 * @tparam ConcatenateLayerFunction Backend concatenate function
367 * @tparam TargetInfo Target-specific information
368 *
369 * @param[in] node Node to create the backend function for
370 *
371 * @return Backend concatenate layer function
372 */
373template <typename ConcatenateLayerFunction, typename TargetInfo>
374std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
375{
376 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
377 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
378
379 // Return nullptr if depth concatenate is switched off
380 if(!node.is_enabled())
381 {
382 return nullptr;
383 }
384
385 // Extract IO and info
386 std::vector<typename TargetInfo::TensorType *> inputs;
387 for(unsigned int i = 0; i < node.num_inputs(); ++i)
388 {
389 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
390 }
391 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100392 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
393 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100394
395 // Create and configure function
396 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
397 func->configure(inputs, output, concat_axis);
398
399 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000400 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
401 std::ostringstream qss;
402 if(is_quantized)
403 {
404 qss << " Output QuantInfo: " << output->info()->quantization_info();
405 }
Pablo Tello32521432018-11-15 14:43:10 +0000406 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
407 << node.name()
408 << " Type: " << node.type()
409 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100410 << " Data Type: " << output->info()->data_type()
411 << " Shape: " << output->info()->tensor_shape()
412 << " Num Inputs: " << inputs.size()
413 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000414 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100415 << std::endl);
416
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000417 return RETURN_UNIQUE_PTR(func);
Georgios Pinitase2220552018-07-20 13:23:44 +0100418}
419
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100420/** Create a backend convolution layer function
421 *
422 * @tparam ConvolutionLayerFunctions Backend convolution functions
423 * @tparam TargetInfo Target-specific information
424 *
425 * @param[in] node Node to create the backend function for
426 * @param[in] ctx Graph context
427 *
428 * @return Backend convolution layer function
429 */
430template <typename ConvolutionLayerFunctions, typename TargetInfo>
431std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
432{
433 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
434
435 // Extract IO and info
436 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
437 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
438 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
439 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
440
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100441 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
442
443 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100444 {
445 biases->info()->set_data_type(DataType::S32);
446 }
447
Georgios Pinitas08346e92018-10-16 19:10:46 +0100448 const PadStrideInfo conv_info = node.convolution_info();
449 const unsigned int num_groups = node.num_groups();
450 const ConvolutionMethod conv_algorithm = node.convolution_method();
451 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
452 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453
454 // Create and configure function (we assume that functions have been validated before creation)
455 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
456 std::unique_ptr<IFunction> func;
457 std::string func_name;
458
Georgios Pinitase2220552018-07-20 13:23:44 +0100459 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100460 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100461 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100462 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
463 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100464 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100466 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100467 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100468 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100469 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
470 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100471 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100472 }
473 else if(conv_algorithm == ConvolutionMethod::GEMM)
474 {
475 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
476 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100477 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100478 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100479 }
480 else
481 {
482 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
483 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100484 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100485 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100486 }
487
488 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100489 std::ostringstream qss;
490 if(is_quantized)
491 {
492 qss << " Input QuantInfo: " << input->info()->quantization_info()
493 << " Weights QuantInfo: " << weights->info()->quantization_info()
494 << " Output QuantInfo: " << output->info()->quantization_info();
495 }
Pablo Tello32521432018-11-15 14:43:10 +0000496 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
497 << node.name()
498 << " Type: " << func_name
499 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100501 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100502 << " Input shape: " << input->info()->tensor_shape()
503 << " Weights shape: " << weights->info()->tensor_shape()
504 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000505 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100506 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100507 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000508 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100509}
510
511/** Create a backend deconvolution layer function
512 *
513 * @tparam DeconvolutionLayerFunction Backend deconvolution function
514 * @tparam TargetInfo Target-specific information
515 *
516 * @param[in] node Node to create the backend function for
517 * @param[in] ctx Graph context
518 *
519 * @return Backend deconvolution layer function
520 */
521template <typename DeconvolutionLayerFunction, typename TargetInfo>
522std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
523{
524 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
525
526 // Extract IO and info
527 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
528 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
529 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
530 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
531
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100532 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100533
534 // Create and configure function (we assume that functions have been validated before creation)
535 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
536 std::unique_ptr<IFunction> func;
537
538 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
539 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100540 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100541
542 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000543 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
544 << node.name()
545 << " Type: " << node.type()
546 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100547 << " Data Type: " << input->info()->data_type()
548 << " Input shape: " << input->info()->tensor_shape()
549 << " Weights shape: " << weights->info()->tensor_shape()
550 << " Output shape: " << output->info()->tensor_shape()
551 << std::endl);
552 return func;
553}
554
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100555/** Create a backend layer depth-wise convolution function
556 *
557 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
558 * @tparam TargetInfo Target-specific information
559 *
560 * @param[in] node Node to create the backend function for
561 *
562 * @return Backend depth-wise convolution layer function
563 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100564template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100565std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
566{
567 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
568
569 // Extract IO and info
570 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
571 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
572 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
573 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
574
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100575 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
576
577 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100578 {
579 biases->info()->set_data_type(DataType::S32);
580 }
581
Manuel Bottini05069f02019-09-26 17:18:26 +0100582 const PadStrideInfo conv_info = node.convolution_info();
583 const unsigned int depth_multiplier = node.depth_multiplier();
584 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100585
586 // Create and configure function (we assume that functions have been validated before creation)
587 std::unique_ptr<IFunction> func;
588 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100589
590 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
591 std::string("DepthwiseConvolutionLayer"),
592 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100593
594 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100595 std::ostringstream qss;
596 if(is_quantized)
597 {
598 qss << " Input QuantInfo: " << input->info()->quantization_info()
599 << " Weights QuantInfo: " << weights->info()->quantization_info()
600 << " Output QuantInfo: " << output->info()->quantization_info();
601 }
Pablo Tello32521432018-11-15 14:43:10 +0000602 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
603 << node.name()
604 << " Type: " << func_name
605 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100606 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100607 << " Input shape: " << input->info()->tensor_shape()
608 << " Weights shape: " << weights->info()->tensor_shape()
609 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000610 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000611 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100612 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100613 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000614 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100615}
616
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000617/** Create a backend dequantize layer function
618 *
619 * @tparam DequantizationLayer Function Backend dequantize function
620 * @tparam TargetInfo Target-specific information
621 *
622 * @param[in] node Node to create the backend function for
623 *
624 * @return Backend dequantize layer function
625 */
626template <typename DequantizationLayerFunction, typename TargetInfo>
627std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
628{
629 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
630
631 // Extract IO and info
632 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
633 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
634
635 ARM_COMPUTE_ERROR_ON(input == nullptr);
636 ARM_COMPUTE_ERROR_ON(output == nullptr);
637
638 // Create and configure function
639 auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
640 func->configure(input, output);
641
642 // Log info
643 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
644 << node.name()
645 << " Type: " << node.type()
646 << " Target: " << TargetInfo::TargetType
647 << " Data Type: " << input->info()->data_type()
648 << " Input shape: " << input->info()->tensor_shape()
649 << " Input quantization info: " << output->info()->quantization_info()
650 << " Output shape: " << output->info()->tensor_shape()
651 << std::endl);
652
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000653 return RETURN_UNIQUE_PTR(func);
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000654}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000655/** Create a backend detection output layer function
656 *
657 * @tparam DetectionOutputLayer Function Backend detection output function
658 * @tparam TargetInfo Target-specific information
659 *
660 * @param[in] node Node to create the backend function for
661 *
662 * @return Backend detection output layer function
663 */
664template <typename DetectionOutputLayerFunction, typename TargetInfo>
665std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
666{
667 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
668
669 // Extract IO and info
670 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
671 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
672 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
673 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
674 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
675
676 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
677 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
678 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
679 ARM_COMPUTE_ERROR_ON(output == nullptr);
680
681 // Create and configure function
682 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
683 func->configure(input0, input1, input2, output, detect_info);
684
685 // Log info
686 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
687 << node.name()
688 << " Type: " << node.type()
689 << " Target: " << TargetInfo::TargetType
690 << " Data Type: " << input0->info()->data_type()
691 << " Input0 shape: " << input0->info()->tensor_shape()
692 << " Input1 shape: " << input1->info()->tensor_shape()
693 << " Input2 shape: " << input2->info()->tensor_shape()
694 << " Output shape: " << output->info()->tensor_shape()
695 << " DetectionOutputLayer info: " << detect_info
696 << std::endl);
697
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000698 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000699}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000700
701/** Create a backend detection post process layer function
702 *
703 * @tparam DetectionPostProcessLayerFunction Backend detection output function
704 * @tparam TargetInfo Target-specific information
705 *
706 * @param[in] node Node to create the backend function for
707 *
708 * @return Backend detection post process layer function
709 */
710template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
711std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
712{
713 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
714
715 // Extract IO and info
716 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
717 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
718 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
719 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
720 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
721 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
722 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
723 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
724
725 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
726 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
727 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
728 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
729 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
730 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
731 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
732
733 // Create and configure function
734 auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
735 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
736
737 // Log info
738 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
739 << node.name()
740 << " Type: " << node.type()
741 << " Target: " << TargetInfo::TargetType
742 << " Data Type: " << input0->info()->data_type()
743 << " Input0 shape: " << input0->info()->tensor_shape()
744 << " Input1 shape: " << input1->info()->tensor_shape()
745 << " Input2 shape: " << input2->info()->tensor_shape()
746 << " Output0 shape: " << output0->info()->tensor_shape()
747 << " Output1 shape: " << output1->info()->tensor_shape()
748 << " Output2 shape: " << output2->info()->tensor_shape()
749 << " Output3 shape: " << output3->info()->tensor_shape()
750 << " DetectionPostProcessLayer info: " << detect_info
751 << std::endl);
752
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000753 return RETURN_UNIQUE_PTR(func);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000754}
755
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100756/** Create a backend element-wise operation layer function
757 *
758 * @tparam EltwiseFunctions Backend element-wise function
759 * @tparam TargetInfo Target-specific information
760 *
761 * @param[in] node Node to create the backend function for
762 *
763 * @return Backend element-wise operation layer function
764 */
765template <typename EltwiseFunctions, typename TargetInfo>
766std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
767{
768 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
769
770 // Extract IO and info
771 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
772 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
773 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
774 const EltwiseOperation eltwise_op = node.eltwise_operation();
775 const ConvertPolicy convert_policy = node.convert_policy();
776 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
777 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
778 ARM_COMPUTE_ERROR_ON(output == nullptr);
779
780 std::unique_ptr<IFunction> func = nullptr;
781 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100782 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100783 {
784 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
785 std::string("ArithmeticAddition"),
786 input1, input2, output, convert_policy);
787 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100788 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100789 {
790 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
791 std::string("ArithmeticSubtraction"),
792 input1, input2, output, convert_policy);
793 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100794 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100795 {
796 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
797 std::string("PixelWiseMultiplication"),
798 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
799 }
800 else
801 {
802 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
803 }
804
805 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000806 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
807 << node.name()
808 << " Type: " << node.type()
809 << " Target: " << TargetInfo::TargetType
810 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100811 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000812 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100813 << std::endl);
814
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000815 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100816}
817
818/** Create a backend flatten layer function
819 *
820 * @tparam FlattenLayerFunction Backend flatten function
821 * @tparam TargetInfo Target-specific information
822 *
823 * @param[in] node Node to create the backend function for
824 *
825 * @return Backend flatten layer function
826 */
827template <typename FlattenLayerFunction, typename TargetInfo>
828std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
829{
830 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
831
832 // Extract IO and info
833 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
834 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
835
Georgios Pinitase2220552018-07-20 13:23:44 +0100836 ARM_COMPUTE_ERROR_ON(input == nullptr);
837 ARM_COMPUTE_ERROR_ON(output == nullptr);
838
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100839 // Create and configure function
840 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
841 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100842
843 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000844 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
845 << node.name()
846 << " Type: " << node.type()
847 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100848 << " Data Type: " << input->info()->data_type()
849 << " Input shape: " << input->info()->tensor_shape()
850 << " Output shape: " << output->info()->tensor_shape()
851 << std::endl);
852
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000853 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100854}
855
856/** Create a backend fully connected layer function
857 *
858 * @tparam FullyConnectedLayerFunction Backend fully-connected function
859 * @tparam TargetInfo Target-specific information
860 *
861 * @param[in] node Node to create the backend function for
862 * @param[in] ctx Graph context
863 *
864 * @return Backend fully connected layer function
865 */
866template <typename FullyConnectedLayerFunction, typename TargetInfo>
867std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
868{
869 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
870
871 // Extract IO and info
872 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
873 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
874 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
875 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100876 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100877
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100878 ARM_COMPUTE_ERROR_ON(input == nullptr);
879 ARM_COMPUTE_ERROR_ON(weights == nullptr);
880 ARM_COMPUTE_ERROR_ON(output == nullptr);
881
Georgios Pinitase2220552018-07-20 13:23:44 +0100882 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100883 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
884 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
885 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +0100886 func->configure(input, weights, biases, output, fc_info);
887
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100888 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
889
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100890 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100891 std::ostringstream qss;
892 if(is_quantized)
893 {
894 qss << " Input QuantInfo: " << input->info()->quantization_info()
895 << " Weights QuantInfo: " << weights->info()->quantization_info()
896 << " Output QuantInfo: " << output->info()->quantization_info();
897 }
Pablo Tello32521432018-11-15 14:43:10 +0000898 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
899 << node.name()
900 << " Type: " << node.type()
901 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100902 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100903 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100904 << " Input shape: " << input->info()->tensor_shape()
905 << " Weights shape: " << weights->info()->tensor_shape()
906 << " Output shape: " << output->info()->tensor_shape()
907 << std::endl);
908
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000909 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100910}
911
Manuel Bottini5209be52019-02-13 16:34:56 +0000912/** Create a backend generate proposals layer function
913 *
914 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
915 * @tparam TargetInfo Target-specific information
916 *
917 * @param[in] node Node to create the backend function for
918 * @param[in] ctx Graph context
919 *
920 * @return Backend generate proposals layer function
921 */
922template <typename GenerateProposalsLayerFunction, typename TargetInfo>
923std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
924{
925 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
926
927 // Extract IO and info
928 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
929 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
930 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
931 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
932 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
933 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
934 const GenerateProposalsInfo info = node.info();
935
936 ARM_COMPUTE_ERROR_ON(scores == nullptr);
937 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
938 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
939 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
940 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
941
942 // Create and configure function
943 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
944 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
945
946 // Log info
947 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
948 << " Target " << TargetInfo::TargetType
949 << " Data Type: " << scores->info()->data_type()
950 << " Scores shape: " << scores->info()->tensor_shape()
951 << " Deltas shape: " << deltas->info()->tensor_shape()
952 << " Anchors shape: " << anchors->info()->tensor_shape()
953 << " Proposals shape: " << proposals->info()->tensor_shape()
954 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
955 << " Scores Out shape: " << scores_out->info()->tensor_shape()
956 << std::endl);
957
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000958 return RETURN_UNIQUE_PTR(func);
Manuel Bottini5209be52019-02-13 16:34:56 +0000959}
960
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100961/** Create a backend normalization layer function
962 *
963 * @tparam NormalizationLayerFunction Backend normalization function
964 * @tparam TargetInfo Target-specific information
965 *
966 * @param[in] node Node to create the backend function for
967 * @param[in] ctx Graph context
968 *
969 * @return Backend normalization layer function
970 */
971template <typename NormalizationLayerFunction, typename TargetInfo>
972std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
973{
974 ARM_COMPUTE_UNUSED(ctx);
975
976 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
977
978 // Extract IO and info
979 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
980 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
981 const NormalizationLayerInfo norm_info = node.normalization_info();
982 ARM_COMPUTE_ERROR_ON(input == nullptr);
983 ARM_COMPUTE_ERROR_ON(output == nullptr);
984
985 // Create and configure function
986 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
987 func->configure(input, output, norm_info);
988
989 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000990 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
991 << node.name()
992 << " Type: " << node.type()
993 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100994 << " Data Type: " << input->info()->data_type()
995 << " Input shape: " << input->info()->tensor_shape()
996 << " Output shape: " << output->info()->tensor_shape()
997 << " Normalization info: " << norm_info.type()
998 << std::endl);
999
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001000 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001001}
1002
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001003/** Create a backend normalize planar YUV layer function
1004 *
1005 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1006 * @tparam TargetInfo Target-specific information
1007 *
1008 * @param[in] node Node to create the backend function for
1009 *
1010 * @return Backend normalize plnar YUV layer function
1011 */
1012template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1013std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1014{
1015 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1016
1017 // Extract IO and info
1018 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1019 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1020 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1021 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1022 ARM_COMPUTE_ERROR_ON(input == nullptr);
1023 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1024 ARM_COMPUTE_ERROR_ON(std == nullptr);
1025 ARM_COMPUTE_ERROR_ON(output == nullptr);
1026
1027 // Create and configure function
1028 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1029 func->configure(input, output, mean, std);
1030
1031 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001032 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1033 << node.name()
1034 << " Type: " << node.type()
1035 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001036 << " Data Type: " << input->info()->data_type()
1037 << " Shape: " << input->info()->tensor_shape()
1038 << std::endl);
1039
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001040 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001041}
1042
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001043/** Create a backend pad layer function
1044 *
1045 * @tparam PadLayerFunction Backend pad function
1046 * @tparam TargetInfo Target-specific information
1047 *
1048 * @param[in] node Node to create the backend function for
1049 *
1050 * @return Backend pad layer function
1051 */
1052template <typename PadLayerFunction, typename TargetInfo>
1053std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1054{
1055 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1056
1057 // Extract IO and info
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001058 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1059 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1060 const PaddingList &padding = node.padding();
1061 const PixelValue pad_value = node.pad_value();
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001062 ARM_COMPUTE_ERROR_ON(input == nullptr);
1063 ARM_COMPUTE_ERROR_ON(output == nullptr);
1064
1065 // Create and configure function
1066 auto func = support::cpp14::make_unique<PadLayerFunction>();
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001067 func->configure(input, output, padding, pad_value);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001068
1069 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001070 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1071 << node.name()
1072 << " Type: " << node.type()
1073 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001074 << " Data Type: " << input->info()->data_type()
1075 << " Input shape: " << input->info()->tensor_shape()
1076 << " Output shape: " << output->info()->tensor_shape()
1077 << std::endl);
1078
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001079 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001080}
1081
Georgios Pinitas57c48242018-08-02 13:41:49 +01001082/** Create a backend permute layer function
1083 *
1084 * @tparam PermuteLayerFunction Backend permute function
1085 * @tparam TargetInfo Target-specific information
1086 *
1087 * @param[in] node Node to create the backend function for
1088 *
1089 * @return Backend permute layer function
1090 */
1091template <typename PermuteLayerFunction, typename TargetInfo>
1092std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1093{
1094 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1095
1096 // Extract IO and info
1097 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1098 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1099 const PermutationVector &perm = node.permutation_vector();
1100 ARM_COMPUTE_ERROR_ON(input == nullptr);
1101 ARM_COMPUTE_ERROR_ON(output == nullptr);
1102
1103 // Create and configure function
1104 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1105 func->configure(input, output, perm);
1106
1107 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001108 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1109 << node.name()
1110 << " Type: " << node.type()
1111 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001112 << " Data Type: " << input->info()->data_type()
1113 << " Input shape: " << input->info()->tensor_shape()
1114 << " Output shape: " << output->info()->tensor_shape()
1115 << " Permutation vector: " << perm
1116 << std::endl);
1117
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001118 return RETURN_UNIQUE_PTR(func);
Georgios Pinitas57c48242018-08-02 13:41:49 +01001119}
1120
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001121/** Create a backend pooling layer function
1122 *
1123 * @tparam PoolingLayerFunction Backend pooling function
1124 * @tparam TargetInfo Target-specific information
1125 *
1126 * @param[in] node Node to create the backend function for
1127 *
1128 * @return Backend pooling layer function
1129 */
1130template <typename PoolingLayerFunction, typename TargetInfo>
1131std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1132{
1133 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1134
1135 // Extract IO and info
1136 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1137 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1138 const PoolingLayerInfo pool_info = node.pooling_info();
1139 ARM_COMPUTE_ERROR_ON(input == nullptr);
1140 ARM_COMPUTE_ERROR_ON(output == nullptr);
1141
1142 // Create and configure function
1143 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1144 func->configure(input, output, pool_info);
1145
1146 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001147 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1148 << node.name()
1149 << " Type: " << node.type()
1150 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001151 << " Data Type: " << input->info()->data_type()
1152 << " Input shape: " << input->info()->tensor_shape()
1153 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001154 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001155 << std::endl);
1156
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001157 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001158}
1159
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001160/** Create a backend PRelu layer function
1161 *
1162 * @tparam PReluFunction Backend PRelu function
1163 * @tparam TargetInfo Target-specific information
1164 *
1165 * @param[in] node Node to create the backend function for
1166 *
1167 * @return Backend PRelu layer function
1168 */
1169template <typename PReluFunction, typename TargetInfo>
1170std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1171{
1172 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1173
1174 // Extract IO and info
1175 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1176 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1177 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1178 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1179 ARM_COMPUTE_ERROR_ON(output == nullptr);
1180
1181 // Create and configure function
1182 auto func = support::cpp14::make_unique<PReluFunction>();
1183 func->configure(input, alpha, output);
1184
1185 // Log info
1186 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1187 << node.name()
1188 << " Type: " << node.type()
1189 << " Target: " << TargetInfo::TargetType
1190 << " Data Type: " << input->info()->data_type()
1191 << " Input shape: " << input->info()->tensor_shape()
1192 << " Output shape: " << output->info()->tensor_shape()
1193 << std::endl);
1194
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001195 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001196}
1197
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001198/** Create a backend print layer function
1199 *
1200 * @tparam TargetInfo Target-specific information
1201 *
1202 * @param[in] node Node to create the backend function for
1203 *
1204 * @return Backend print layer function
1205 */
1206template <typename TargetInfo>
1207std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1208{
1209 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1210
1211 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1212 ARM_COMPUTE_ERROR_ON(input == nullptr);
1213 ARM_COMPUTE_UNUSED(input);
1214
1215 // Log info
1216 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1217 << node.name()
1218 << " Type: " << node.type()
1219 << " Target: " << TargetInfo::TargetType
1220 << " Data Type: " << input->info()->data_type()
1221 << " Input shape: " << input->info()->tensor_shape()
1222 << std::endl);
1223
1224 return nullptr;
1225}
1226
Pablo Tello32521432018-11-15 14:43:10 +00001227/** Create a backend priorbox layer function
1228 *
1229 * @tparam PriorBoxLayerFunction Backend priorbox function
1230 * @tparam TargetInfo Target-specific information
1231 *
1232 * @param[in] node Node to create the backend function for
1233 *
1234 * @return Backend priorbox layer function
1235 */
1236template <typename PriorBoxLayerFunction, typename TargetInfo>
1237std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1238{
1239 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1240
1241 // Extract IO and info
1242 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1243 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1244 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1245 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1246 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1247 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1248 ARM_COMPUTE_ERROR_ON(output == nullptr);
1249
1250 // Create and configure function
1251 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1252 func->configure(input0, input1, output, prior_info);
1253
1254 // Log info
1255 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1256 << node.name()
1257 << " Type: " << node.type()
1258 << " Target: " << TargetInfo::TargetType
1259 << " Data Type: " << input0->info()->data_type()
1260 << " Input0 shape: " << input0->info()->tensor_shape()
1261 << " Input1 shape: " << input1->info()->tensor_shape()
1262 << " Output shape: " << output->info()->tensor_shape()
1263 << " PriorBoxLayer info: " << prior_info
1264 << std::endl);
1265
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001266 return RETURN_UNIQUE_PTR(func);
Pablo Tello32521432018-11-15 14:43:10 +00001267}
1268
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001269/** Create a backend quantization layer function
1270 *
1271 * @tparam QuantizationLayerFunction Backend quantization function
1272 * @tparam TargetInfo Target-specific information
1273 *
1274 * @param[in] node Node to create the backend function for
1275 *
1276 * @return Backend quantization layer function
1277 */
1278template <typename QuantizationLayerFunction, typename TargetInfo>
1279std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1280{
1281 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1282
1283 // Extract IO and info
1284 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1285 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1286 ARM_COMPUTE_ERROR_ON(input == nullptr);
1287 ARM_COMPUTE_ERROR_ON(output == nullptr);
1288
1289 // Create and configure function
1290 auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1291 func->configure(input, output);
1292
1293 // Log info
1294 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1295 << node.name()
1296 << " Type: " << node.type()
1297 << " Target: " << TargetInfo::TargetType
1298 << " Data Type: " << input->info()->data_type()
1299 << " Input shape: " << input->info()->tensor_shape()
1300 << " Output shape: " << output->info()->tensor_shape()
1301 << std::endl);
1302
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001303 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001304}
1305
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001306/** Create a backend reorg layer function
1307 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001308 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001309 * @tparam TargetInfo Target-specific information
1310 *
1311 * @param[in] node Node to create the backend function for
1312 *
1313 * @return Backend reshape layer function
1314 */
1315template <typename ReorgLayerFunction, typename TargetInfo>
1316std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1317{
1318 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1319
1320 // Extract IO and info
1321 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1322 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1323 ARM_COMPUTE_ERROR_ON(input == nullptr);
1324 ARM_COMPUTE_ERROR_ON(output == nullptr);
1325
1326 // Create and configure function
1327 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1328 func->configure(input, output, node.stride());
1329
1330 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001331 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1332 << node.name()
1333 << " Type: " << node.type()
1334 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001335 << " Data Type: " << input->info()->data_type()
1336 << " Input shape: " << input->info()->tensor_shape()
1337 << " Output shape: " << output->info()->tensor_shape()
1338 << std::endl);
1339
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001340 return RETURN_UNIQUE_PTR(func);
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001341}
1342
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001343/** Create a backend reshape layer function
1344 *
1345 * @tparam ReshapeLayerFunction Backend reshape function
1346 * @tparam TargetInfo Target-specific information
1347 *
1348 * @param[in] node Node to create the backend function for
1349 *
1350 * @return Backend reshape layer function
1351 */
1352template <typename ReshapeLayerFunction, typename TargetInfo>
1353std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1354{
1355 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1356
1357 // Extract IO and info
1358 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1359 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1360 ARM_COMPUTE_ERROR_ON(input == nullptr);
1361 ARM_COMPUTE_ERROR_ON(output == nullptr);
1362
1363 // Create and configure function
1364 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1365 func->configure(input, output);
1366
1367 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001368 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1369 << node.name()
1370 << " Type: " << node.type()
1371 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001372 << " Data Type: " << input->info()->data_type()
1373 << " Input shape: " << input->info()->tensor_shape()
1374 << " Output shape: " << output->info()->tensor_shape()
1375 << std::endl);
1376
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001377 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001378}
1379
1380/** Create a backend resize layer function
1381 *
1382 * @tparam ResizeLayerFunction Backend resize function
1383 * @tparam TargetInfo Target-specific information
1384 *
1385 * @param[in] node Node to create the backend function for
1386 *
1387 * @return Backend resize layer function
1388 */
1389template <typename ResizeLayerFunction, typename TargetInfo>
1390std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1391{
1392 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1393
1394 // Extract IO and info
1395 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1396 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1397 ARM_COMPUTE_ERROR_ON(input == nullptr);
1398 ARM_COMPUTE_ERROR_ON(output == nullptr);
1399 const InterpolationPolicy policy = node.policy();
1400
1401 // Create and configure function
1402 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
1403 func->configure(input, output, policy, BorderMode::CONSTANT);
1404
1405 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001406 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1407 << node.name()
1408 << " Type: " << node.type()
1409 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001410 << " Data Type: " << input->info()->data_type()
1411 << " Input shape: " << input->info()->tensor_shape()
1412 << " Output shape: " << output->info()->tensor_shape()
1413 << " Interpolation: " << policy
1414 << std::endl);
1415
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001416 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001417}
1418
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001419/** Create a backend ROI align layer function
1420 *
1421 * @tparam ROIAlignLayerFunction ROI Align function
1422 * @tparam TargetInfo Target-specific information
1423 *
1424 * @param[in] node Node to create the backend function for
1425 *
1426 * @return ROI Align layer function
1427 */
1428template <typename ROIAlignLayerFunction, typename TargetInfo>
1429std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1430{
1431 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1432
1433 // Extract IO and info
1434 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1435 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1436 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1437 ARM_COMPUTE_ERROR_ON(input == nullptr);
1438 ARM_COMPUTE_ERROR_ON(output == nullptr);
1439 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1440
1441 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1442
1443 // Create and configure function
1444 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1445
1446 func->configure(input, rois, output, pool_info);
1447
1448 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001449 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1450 << node.name()
1451 << " Type: " << node.type()
1452 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001453 << " Data Type: " << input->info()->data_type()
1454 << " Input shape: " << input->info()->tensor_shape()
1455 << " Output shape: " << output->info()->tensor_shape()
1456 << " ROIs shape: " << rois->info()->tensor_shape()
1457 << " ROIPooling width: " << pool_info.pooled_width()
1458 << " ROIPooling height: " << pool_info.pooled_height()
1459 << std::endl);
1460
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001461 return RETURN_UNIQUE_PTR(func);
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001462}
1463
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001464/** Create a backend slice layer function
1465 *
1466 * @tparam SliceLayerFunction Backend slice function
1467 * @tparam TargetInfo Target-specific information
1468 *
1469 * @param[in] node Node to create the backend function for
1470 *
1471 * @return Backend slice layer function
1472 */
1473template <typename SliceLayerFunction, typename TargetInfo>
1474std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1475{
1476 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1477
1478 // Extract IO and info
1479 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1480 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1481 ARM_COMPUTE_ERROR_ON(input == nullptr);
1482 ARM_COMPUTE_ERROR_ON(output == nullptr);
1483
1484 // Create and configure function
1485 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1486 func->configure(input, output, node.starts(), node.ends());
1487
1488 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001489 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1490 << node.name()
1491 << " Type: " << node.type()
1492 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001493 << " Data Type: " << input->info()->data_type()
1494 << " Input shape: " << input->info()->tensor_shape()
1495 << " Output shape: " << output->info()->tensor_shape()
1496 << std::endl);
1497
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001498 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001499}
1500
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001501/** Create a backend softmax layer function
1502 *
1503 * @tparam SoftmaxLayerFunction Backend softmax function
1504 * @tparam TargetInfo Target-specific information
1505 *
1506 * @param[in] node Node to create the backend function for
1507 * @param[in] ctx Graph context
1508 *
1509 * @return Backend softmax layer function
1510 */
1511template <typename SoftmaxLayerFunction, typename TargetInfo>
1512std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1513{
1514 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1515
1516 // Extract IO and info
1517 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1518 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1519 const float beta = node.beta();
1520 ARM_COMPUTE_ERROR_ON(input == nullptr);
1521 ARM_COMPUTE_ERROR_ON(output == nullptr);
1522
1523 // Create and configure function
1524 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1525 func->configure(input, output, beta);
1526
1527 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001528 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1529 << node.name()
1530 << " Type: " << node.type()
1531 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001532 << " Data Type: " << input->info()->data_type()
1533 << " Input shape: " << input->info()->tensor_shape()
1534 << " Output shape: " << output->info()->tensor_shape()
1535 << std::endl);
1536
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001537 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001538}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001539
1540/** Create a backend layer stack function
1541 *
1542 * @tparam StackLayerFunction Backend stack function
1543 * @tparam TargetInfo Target-specific information
1544 *
1545 * @param[in] node Node to create the backend function for
1546 *
1547 * @return Backend stack layer function
1548 */
1549template <typename StackLayerFunction, typename TargetInfo>
1550std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1551{
1552 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1553 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1554
1555 // Extract IO and info
1556 std::vector<typename TargetInfo::TensorType *> inputs;
1557 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1558 {
1559 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1560 }
1561 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1562 const int axis = node.axis();
1563
1564 // Create and configure function
1565 auto func = support::cpp14::make_unique<StackLayerFunction>();
1566 func->configure(inputs, axis, output);
1567
1568 // Log info
1569 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1570 << node.name()
1571 << " Type: " << node.type()
1572 << " Target: " << TargetInfo::TargetType
1573 << " Data Type: " << output->info()->data_type()
1574 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1575 << " Output shape: " << output->info()->tensor_shape()
1576 << " Num Inputs: " << inputs.size()
1577 << " Axis: " << axis
1578 << std::endl);
1579
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001580 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioec699752019-03-22 15:25:32 +00001581}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001582/** Create a backend Upsample layer function
1583 *
1584 * @tparam UpsampleLayerFunction Backend Upsample function
1585 * @tparam TargetInfo Target-specific information
1586 *
1587 * @param[in] node Node to create the backend function for
1588 * @param[in] ctx Graph context
1589 *
1590 * @return Backend Upsample layer function
1591 */
1592template <typename UpsampleLayerFunction, typename TargetInfo>
1593std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1594{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001595 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001596 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1597
1598 // Extract IO and info
1599 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1600 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1601 const Size2D info = node.info();
1602 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1603 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1604 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1605 ARM_COMPUTE_ERROR_ON(input == nullptr);
1606 ARM_COMPUTE_ERROR_ON(output == nullptr);
1607
1608 // Create and configure function
1609 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1610 func->configure(input, output, info, upsampling_policy);
1611
1612 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001613 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1614 << node.name()
1615 << " Type: " << node.type()
1616 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001617 << " Data Type: " << input->info()->data_type()
1618 << " Input shape: " << input->info()->tensor_shape()
1619 << " Output shape: " << output->info()->tensor_shape()
1620 << " Strides: " << info
1621 << " Upsampling policy: " << upsampling_policy
1622 << std::endl);
1623
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001624 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001625}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001626/** Create a backend YOLO layer function
1627 *
1628 * @tparam YoloLayerFunction Backend YOLO function
1629 * @tparam TargetInfo Target-specific information
1630 *
1631 * @param[in] node Node to create the backend function for
1632 * @param[in] ctx Graph context
1633 *
1634 * @return Backend YOLO layer function
1635 */
1636template <typename YOLOlayerFunction, typename TargetInfo>
1637std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1638{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001639 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001640 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1641
1642 // Extract IO and info
1643 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1644 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1645 const ActivationLayerInfo act_info = node.activation_info();
1646 const int32_t num_classes = node.num_classes();
1647 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1648 ARM_COMPUTE_ERROR_ON(input == nullptr);
1649 ARM_COMPUTE_ERROR_ON(output == nullptr);
1650
1651 // Create and configure function
1652 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1653 func->configure(input, output, act_info, num_classes);
1654
1655 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001656 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1657 << node.name()
1658 << " Type: " << node.type()
1659 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001660 << " Data Type: " << input->info()->data_type()
1661 << " Input shape: " << input->info()->tensor_shape()
1662 << " Output shape: " << output->info()->tensor_shape()
1663 << " Activation function: " << act_info.activation()
1664 << " Num classes: " << num_classes
1665 << std::endl);
1666
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001667 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001668}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001669} // namespace detail
1670} // namespace backends
1671} // namespace graph
1672} // namespace arm_compute
1673
Michalis Spyrouf4643372019-11-29 16:17:13 +00001674#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */