blob: 9f352edb9a293bd930b21a7c002ccb4738bdad3c [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00002 * Copyright (c) 2018-2020 ARM Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010031#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000032#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010033#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010034#include "arm_compute/graph/backends/Utils.h"
35#include "arm_compute/graph/nodes/Nodes.h"
36
37#include "arm_compute/core/Error.h"
38#include "arm_compute/core/Helpers.h"
39#include "arm_compute/core/ITensorInfo.h"
40#include "arm_compute/core/utils/misc/Cast.h"
41
42namespace arm_compute
43{
44namespace graph
45{
46namespace backends
47{
48namespace detail
49{
50/** Returns backing tensor of a given tensor
51 *
52 * @tparam TargetInfo Target information
53 *
54 * @param[in] tensor Tensor to extract the backing tensor from
55 *
56 * @return Backing tensor if present else nullptr
57 */
58template <typename TargetInfo>
59typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
60{
61 typename TargetInfo::TensorType *backing_tensor = nullptr;
62 if(tensor != nullptr)
63 {
64 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
65 // Get backing tensor handle
66 ITensorHandle *tensor_handle = tensor->handle();
67 // Get backing tensor
68 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
69 }
70
71 return backing_tensor;
72}
73
74template <typename TargetInfo>
75void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
76{
77 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000078 << " Target: " << TargetInfo::TargetType
79 << " ID: " << node.id()
80 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010081 << std::endl);
82
83 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
84 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
85 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010086 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010087}
88
89/** Creates a backend activation layer function
90 *
91 * @tparam ActivationLayerFunction Backend activation function
92 * @tparam TargetInfo Target-specific information
93 *
94 * @param[in] node Node to create the backend function for
95 *
96 * @return Backend activation layer function
97 */
98template <typename ActivationLayerFunction, typename TargetInfo>
99std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
100{
101 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
102
103 // Extract IO and info
104 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
105 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
106 const ActivationLayerInfo act_info = node.activation_info();
107
108 // Create function
109 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
110 func->configure(input, output, act_info);
111
Pablo Tello32521432018-11-15 14:43:10 +0000112 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
113 << node.name()
114 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000115 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100116 << " Data Type: " << input->info()->data_type()
117 << " Shape: " << input->info()->tensor_shape()
118 << " Activation function: " << act_info.activation()
119 << " a: " << act_info.a()
120 << " b: " << act_info.b()
121 << " InPlace : " << is_in_place_operation(input, output)
122 << std::endl);
123
124 return std::move(func);
125}
126
127/** Create a backend batch normalization layer function
128 *
129 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
130 * @tparam TargetInfo Target-specific information
131 *
132 * @param[in] node Node to create the backend function for
133 *
134 * @return Backend batch normalization layer function
135 */
136template <typename BatchNormalizationLayerFunction, typename TargetInfo>
137std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
138{
139 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
140
141 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000142 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
143 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
144 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
145 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
146 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
147
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100148 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
149 const float epsilon = node.epsilon();
150 const ActivationLayerInfo fused_act = node.fused_activation();
151
152 // Create and configure function
153 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
154 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
155
156 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000157 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
158 << node.name()
159 << " Type: " << node.type()
160 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100161 << " Data Type: " << input->info()->data_type()
162 << " Shape: " << input->info()->tensor_shape()
163 << " Epsilon: " << epsilon << " "
164 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000165 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100166 << std::endl);
167
168 return std::move(func);
169}
170
giuros01acce5042019-02-21 17:32:34 +0000171/** Create a backend batch normalization layer function
172 *
173 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
174 * @tparam TargetInfo Target-specific information
175 *
176 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000177 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000178 *
179 * @return Backend batch normalization layer function
180 */
181template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000182std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000183{
184 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
185
186 // Extract IO and info
187 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
188 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
189 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
190 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
191 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
192 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
193 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
194
195 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
196
197 const PadStrideInfo conv_info = node.convolution_info();
198 const unsigned int num_groups = node.num_groups();
199 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
200 const ActivationLayerInfo fused_act = node.fused_activation();
201 const float epsilon = node.epsilon();
202
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000203 // Create and configure function (we assume that functions have been validated before creation)
204 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
205 std::unique_ptr<IFunction> func;
206 std::string func_name;
207
208 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
209
giuros01acce5042019-02-21 17:32:34 +0000210 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000211 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
212 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000213
214 // Log info
215 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
216 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100217 << " Type: " << node.type()
218 << " Target: " << TargetInfo::TargetType
219 << " Data Type: " << input->info()->data_type()
220 << " Input shape: " << input->info()->tensor_shape()
221 << " Weights shape: " << weights->info()->tensor_shape()
222 << " Output shape: " << output->info()->tensor_shape()
223 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
224 << std::endl);
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000225 return func;
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100226}
227
228/** Create a backend fused depthwise convolution batch normalization layer function
229 *
230 * @tparam FusedLayerTypes Fused layer types
231 * @tparam TargetInfo Target-specific information
232 *
233 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000234 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100235 *
236 * @return Backend fused depthwise convolution batch normalization layer function
237 */
238template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000239std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100240{
241 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
242
243 // Extract IO and info
244 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
245 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
246 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
247 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
248 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
249 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
250 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
251
252 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
253
254 const PadStrideInfo conv_info = node.convolution_info();
255 const unsigned int depth_multiplier = node.depth_multiplier();
256 const ActivationLayerInfo fused_act = node.fused_activation();
257 const float epsilon = node.epsilon();
258
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000259 // Create and configure function (we assume that functions have been validated before creation)
260 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
261 std::unique_ptr<IFunction> func;
262 std::string func_name;
263
264 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
265
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100266 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000267 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
268 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100269
270 // Log info
271 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
272 << node.name()
273 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000274 << " Target: " << TargetInfo::TargetType
275 << " Data Type: " << input->info()->data_type()
276 << " Input shape: " << input->info()->tensor_shape()
277 << " Weights shape: " << weights->info()->tensor_shape()
278 << " Output shape: " << output->info()->tensor_shape()
279 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
280 << std::endl);
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000281 return func;
giuros01acce5042019-02-21 17:32:34 +0000282}
283
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100284/** Create a backend bounding box transform layer function
285 *
286 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
287 * @tparam TargetInfo Target-specific information
288 *
289 * @param[in] node Node to create the backend function for
290 *
291 * @return Backend bounding box transform layer function
292 */
293template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
294std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
295{
296 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
297
298 // Extract IO and info
299 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
300 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
301 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
302 const BoundingBoxTransformInfo bbox_info = node.info();
303
304 // Create and configure function
305 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
306 func->configure(input, output, deltas, bbox_info);
307
308 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000309 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
310 << node.name()
311 << " Type: " << node.type()
312 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100313 << " Data Type: " << input->info()->data_type()
314 << " Shape: " << input->info()->tensor_shape()
315 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
316 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
317 << std::endl);
318
319 return std::move(func);
320}
321
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100322/** Create a backend channel shuffle layer function
323 *
324 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
325 * @tparam TargetInfo Target-specific information
326 *
327 * @param[in] node Node to create the backend function for
328 *
329 * @return Backend channel shuffle layer function
330 */
331template <typename ChannelShuffleLayerFunction, typename TargetInfo>
332std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
333{
334 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
335
336 // Extract IO and info
337 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
338 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
339 const unsigned int num_groups = node.num_groups();
340
341 // Create function
342 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
343 func->configure(input, output, num_groups);
344
Pablo Tello32521432018-11-15 14:43:10 +0000345 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
346 << node.name()
347 << " Type: " << node.type()
348 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100349 << " Data Type: " << input->info()->data_type()
350 << " Shape: " << input->info()->tensor_shape()
351 << " Num groups: " << num_groups
352 << std::endl);
353
354 return std::move(func);
355}
356
Georgios Pinitase2220552018-07-20 13:23:44 +0100357/** Create a backend layer concatenate function
358 *
359 * @tparam ConcatenateLayerFunction Backend concatenate function
360 * @tparam TargetInfo Target-specific information
361 *
362 * @param[in] node Node to create the backend function for
363 *
364 * @return Backend concatenate layer function
365 */
366template <typename ConcatenateLayerFunction, typename TargetInfo>
367std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
368{
369 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
370 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
371
372 // Return nullptr if depth concatenate is switched off
373 if(!node.is_enabled())
374 {
375 return nullptr;
376 }
377
378 // Extract IO and info
379 std::vector<typename TargetInfo::TensorType *> inputs;
380 for(unsigned int i = 0; i < node.num_inputs(); ++i)
381 {
382 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
383 }
384 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100385 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
386 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100387
388 // Create and configure function
389 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
390 func->configure(inputs, output, concat_axis);
391
392 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000393 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
394 std::ostringstream qss;
395 if(is_quantized)
396 {
397 qss << " Output QuantInfo: " << output->info()->quantization_info();
398 }
Pablo Tello32521432018-11-15 14:43:10 +0000399 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
400 << node.name()
401 << " Type: " << node.type()
402 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100403 << " Data Type: " << output->info()->data_type()
404 << " Shape: " << output->info()->tensor_shape()
405 << " Num Inputs: " << inputs.size()
406 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000407 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100408 << std::endl);
409
410 return std::move(func);
411}
412
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100413/** Create a backend convolution layer function
414 *
415 * @tparam ConvolutionLayerFunctions Backend convolution functions
416 * @tparam TargetInfo Target-specific information
417 *
418 * @param[in] node Node to create the backend function for
419 * @param[in] ctx Graph context
420 *
421 * @return Backend convolution layer function
422 */
423template <typename ConvolutionLayerFunctions, typename TargetInfo>
424std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
425{
426 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
427
428 // Extract IO and info
429 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
430 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
431 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
432 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
433
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100434 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
435
436 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100437 {
438 biases->info()->set_data_type(DataType::S32);
439 }
440
Georgios Pinitas08346e92018-10-16 19:10:46 +0100441 const PadStrideInfo conv_info = node.convolution_info();
442 const unsigned int num_groups = node.num_groups();
443 const ConvolutionMethod conv_algorithm = node.convolution_method();
444 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
445 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100446
447 // Create and configure function (we assume that functions have been validated before creation)
448 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
449 std::unique_ptr<IFunction> func;
450 std::string func_name;
451
Georgios Pinitase2220552018-07-20 13:23:44 +0100452 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100454 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100455 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
456 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100457 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100458 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100459 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100460 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100461 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100462 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
463 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100464 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 }
466 else if(conv_algorithm == ConvolutionMethod::GEMM)
467 {
468 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
469 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100470 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100471 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100472 }
473 else
474 {
475 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
476 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100477 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100478 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100479 }
480
481 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100482 std::ostringstream qss;
483 if(is_quantized)
484 {
485 qss << " Input QuantInfo: " << input->info()->quantization_info()
486 << " Weights QuantInfo: " << weights->info()->quantization_info()
487 << " Output QuantInfo: " << output->info()->quantization_info();
488 }
Pablo Tello32521432018-11-15 14:43:10 +0000489 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
490 << node.name()
491 << " Type: " << func_name
492 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100493 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100494 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100495 << " Input shape: " << input->info()->tensor_shape()
496 << " Weights shape: " << weights->info()->tensor_shape()
497 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000498 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100499 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 << std::endl);
501 return func;
502}
503
504/** Create a backend deconvolution layer function
505 *
506 * @tparam DeconvolutionLayerFunction Backend deconvolution function
507 * @tparam TargetInfo Target-specific information
508 *
509 * @param[in] node Node to create the backend function for
510 * @param[in] ctx Graph context
511 *
512 * @return Backend deconvolution layer function
513 */
514template <typename DeconvolutionLayerFunction, typename TargetInfo>
515std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
516{
517 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
518
519 // Extract IO and info
520 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
521 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
522 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
523 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
524
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100525 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100526
527 // Create and configure function (we assume that functions have been validated before creation)
528 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
529 std::unique_ptr<IFunction> func;
530
531 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
532 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100533 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100534
535 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000536 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
537 << node.name()
538 << " Type: " << node.type()
539 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100540 << " Data Type: " << input->info()->data_type()
541 << " Input shape: " << input->info()->tensor_shape()
542 << " Weights shape: " << weights->info()->tensor_shape()
543 << " Output shape: " << output->info()->tensor_shape()
544 << std::endl);
545 return func;
546}
547
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100548/** Create a backend layer depth-wise convolution function
549 *
550 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
551 * @tparam TargetInfo Target-specific information
552 *
553 * @param[in] node Node to create the backend function for
554 *
555 * @return Backend depth-wise convolution layer function
556 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100557template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100558std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
559{
560 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
561
562 // Extract IO and info
563 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
564 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
565 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
566 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
567
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100568 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
569
570 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100571 {
572 biases->info()->set_data_type(DataType::S32);
573 }
574
Manuel Bottini05069f02019-09-26 17:18:26 +0100575 const PadStrideInfo conv_info = node.convolution_info();
576 const unsigned int depth_multiplier = node.depth_multiplier();
577 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100578
579 // Create and configure function (we assume that functions have been validated before creation)
580 std::unique_ptr<IFunction> func;
581 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100582
583 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
584 std::string("DepthwiseConvolutionLayer"),
585 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100586
587 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100588 std::ostringstream qss;
589 if(is_quantized)
590 {
591 qss << " Input QuantInfo: " << input->info()->quantization_info()
592 << " Weights QuantInfo: " << weights->info()->quantization_info()
593 << " Output QuantInfo: " << output->info()->quantization_info();
594 }
Pablo Tello32521432018-11-15 14:43:10 +0000595 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
596 << node.name()
597 << " Type: " << func_name
598 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100599 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100600 << " Input shape: " << input->info()->tensor_shape()
601 << " Weights shape: " << weights->info()->tensor_shape()
602 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000603 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000604 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100605 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100606 << std::endl);
607 return func;
608}
609
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000610/** Create a backend dequantize layer function
611 *
612 * @tparam DequantizationLayer Function Backend dequantize function
613 * @tparam TargetInfo Target-specific information
614 *
615 * @param[in] node Node to create the backend function for
616 *
617 * @return Backend dequantize layer function
618 */
619template <typename DequantizationLayerFunction, typename TargetInfo>
620std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
621{
622 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
623
624 // Extract IO and info
625 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
626 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
627
628 ARM_COMPUTE_ERROR_ON(input == nullptr);
629 ARM_COMPUTE_ERROR_ON(output == nullptr);
630
631 // Create and configure function
632 auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
633 func->configure(input, output);
634
635 // Log info
636 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
637 << node.name()
638 << " Type: " << node.type()
639 << " Target: " << TargetInfo::TargetType
640 << " Data Type: " << input->info()->data_type()
641 << " Input shape: " << input->info()->tensor_shape()
642 << " Input quantization info: " << output->info()->quantization_info()
643 << " Output shape: " << output->info()->tensor_shape()
644 << std::endl);
645
646 return std::move(func);
647}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000648/** Create a backend detection output layer function
649 *
650 * @tparam DetectionOutputLayer Function Backend detection output function
651 * @tparam TargetInfo Target-specific information
652 *
653 * @param[in] node Node to create the backend function for
654 *
655 * @return Backend detection output layer function
656 */
657template <typename DetectionOutputLayerFunction, typename TargetInfo>
658std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
659{
660 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
661
662 // Extract IO and info
663 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
664 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
665 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
666 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
667 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
668
669 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
670 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
671 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
672 ARM_COMPUTE_ERROR_ON(output == nullptr);
673
674 // Create and configure function
675 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
676 func->configure(input0, input1, input2, output, detect_info);
677
678 // Log info
679 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
680 << node.name()
681 << " Type: " << node.type()
682 << " Target: " << TargetInfo::TargetType
683 << " Data Type: " << input0->info()->data_type()
684 << " Input0 shape: " << input0->info()->tensor_shape()
685 << " Input1 shape: " << input1->info()->tensor_shape()
686 << " Input2 shape: " << input2->info()->tensor_shape()
687 << " Output shape: " << output->info()->tensor_shape()
688 << " DetectionOutputLayer info: " << detect_info
689 << std::endl);
690
691 return std::move(func);
692}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000693
694/** Create a backend detection post process layer function
695 *
696 * @tparam DetectionPostProcessLayerFunction Backend detection output function
697 * @tparam TargetInfo Target-specific information
698 *
699 * @param[in] node Node to create the backend function for
700 *
701 * @return Backend detection post process layer function
702 */
703template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
704std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
705{
706 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
707
708 // Extract IO and info
709 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
710 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
711 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
712 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
713 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
714 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
715 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
716 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
717
718 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
719 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
720 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
721 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
722 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
723 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
724 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
725
726 // Create and configure function
727 auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
728 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
729
730 // Log info
731 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
732 << node.name()
733 << " Type: " << node.type()
734 << " Target: " << TargetInfo::TargetType
735 << " Data Type: " << input0->info()->data_type()
736 << " Input0 shape: " << input0->info()->tensor_shape()
737 << " Input1 shape: " << input1->info()->tensor_shape()
738 << " Input2 shape: " << input2->info()->tensor_shape()
739 << " Output0 shape: " << output0->info()->tensor_shape()
740 << " Output1 shape: " << output1->info()->tensor_shape()
741 << " Output2 shape: " << output2->info()->tensor_shape()
742 << " Output3 shape: " << output3->info()->tensor_shape()
743 << " DetectionPostProcessLayer info: " << detect_info
744 << std::endl);
745
746 return std::move(func);
747}
748
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100749/** Create a backend element-wise operation layer function
750 *
751 * @tparam EltwiseFunctions Backend element-wise function
752 * @tparam TargetInfo Target-specific information
753 *
754 * @param[in] node Node to create the backend function for
755 *
756 * @return Backend element-wise operation layer function
757 */
758template <typename EltwiseFunctions, typename TargetInfo>
759std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
760{
761 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
762
763 // Extract IO and info
764 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
765 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
766 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
767 const EltwiseOperation eltwise_op = node.eltwise_operation();
768 const ConvertPolicy convert_policy = node.convert_policy();
769 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
770 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
771 ARM_COMPUTE_ERROR_ON(output == nullptr);
772
773 std::unique_ptr<IFunction> func = nullptr;
774 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100775 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100776 {
777 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
778 std::string("ArithmeticAddition"),
779 input1, input2, output, convert_policy);
780 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100781 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100782 {
783 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
784 std::string("ArithmeticSubtraction"),
785 input1, input2, output, convert_policy);
786 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100787 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100788 {
789 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
790 std::string("PixelWiseMultiplication"),
791 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
792 }
793 else
794 {
795 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
796 }
797
798 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000799 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
800 << node.name()
801 << " Type: " << node.type()
802 << " Target: " << TargetInfo::TargetType
803 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100804 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000805 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100806 << std::endl);
807
808 return func;
809}
810
811/** Create a backend flatten layer function
812 *
813 * @tparam FlattenLayerFunction Backend flatten function
814 * @tparam TargetInfo Target-specific information
815 *
816 * @param[in] node Node to create the backend function for
817 *
818 * @return Backend flatten layer function
819 */
820template <typename FlattenLayerFunction, typename TargetInfo>
821std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
822{
823 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
824
825 // Extract IO and info
826 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
827 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
828
Georgios Pinitase2220552018-07-20 13:23:44 +0100829 ARM_COMPUTE_ERROR_ON(input == nullptr);
830 ARM_COMPUTE_ERROR_ON(output == nullptr);
831
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100832 // Create and configure function
833 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
834 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100835
836 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000837 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
838 << node.name()
839 << " Type: " << node.type()
840 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100841 << " Data Type: " << input->info()->data_type()
842 << " Input shape: " << input->info()->tensor_shape()
843 << " Output shape: " << output->info()->tensor_shape()
844 << std::endl);
845
846 return std::move(func);
847}
848
849/** Create a backend fully connected layer function
850 *
851 * @tparam FullyConnectedLayerFunction Backend fully-connected function
852 * @tparam TargetInfo Target-specific information
853 *
854 * @param[in] node Node to create the backend function for
855 * @param[in] ctx Graph context
856 *
857 * @return Backend fully connected layer function
858 */
859template <typename FullyConnectedLayerFunction, typename TargetInfo>
860std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
861{
862 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
863
864 // Extract IO and info
865 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
866 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
867 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
868 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100869 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100870
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100871 ARM_COMPUTE_ERROR_ON(input == nullptr);
872 ARM_COMPUTE_ERROR_ON(weights == nullptr);
873 ARM_COMPUTE_ERROR_ON(output == nullptr);
874
Georgios Pinitase2220552018-07-20 13:23:44 +0100875 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100876 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
877 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
878 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +0100879 func->configure(input, weights, biases, output, fc_info);
880
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100881 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
882
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100883 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100884 std::ostringstream qss;
885 if(is_quantized)
886 {
887 qss << " Input QuantInfo: " << input->info()->quantization_info()
888 << " Weights QuantInfo: " << weights->info()->quantization_info()
889 << " Output QuantInfo: " << output->info()->quantization_info();
890 }
Pablo Tello32521432018-11-15 14:43:10 +0000891 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
892 << node.name()
893 << " Type: " << node.type()
894 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100895 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100896 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100897 << " Input shape: " << input->info()->tensor_shape()
898 << " Weights shape: " << weights->info()->tensor_shape()
899 << " Output shape: " << output->info()->tensor_shape()
900 << std::endl);
901
902 return std::move(func);
903}
904
Manuel Bottini5209be52019-02-13 16:34:56 +0000905/** Create a backend generate proposals layer function
906 *
907 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
908 * @tparam TargetInfo Target-specific information
909 *
910 * @param[in] node Node to create the backend function for
911 * @param[in] ctx Graph context
912 *
913 * @return Backend generate proposals layer function
914 */
915template <typename GenerateProposalsLayerFunction, typename TargetInfo>
916std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
917{
918 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
919
920 // Extract IO and info
921 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
922 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
923 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
924 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
925 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
926 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
927 const GenerateProposalsInfo info = node.info();
928
929 ARM_COMPUTE_ERROR_ON(scores == nullptr);
930 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
931 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
932 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
933 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
934
935 // Create and configure function
936 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
937 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
938
939 // Log info
940 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
941 << " Target " << TargetInfo::TargetType
942 << " Data Type: " << scores->info()->data_type()
943 << " Scores shape: " << scores->info()->tensor_shape()
944 << " Deltas shape: " << deltas->info()->tensor_shape()
945 << " Anchors shape: " << anchors->info()->tensor_shape()
946 << " Proposals shape: " << proposals->info()->tensor_shape()
947 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
948 << " Scores Out shape: " << scores_out->info()->tensor_shape()
949 << std::endl);
950
951 return std::move(func);
952}
953
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100954/** Create a backend normalization layer function
955 *
956 * @tparam NormalizationLayerFunction Backend normalization function
957 * @tparam TargetInfo Target-specific information
958 *
959 * @param[in] node Node to create the backend function for
960 * @param[in] ctx Graph context
961 *
962 * @return Backend normalization layer function
963 */
964template <typename NormalizationLayerFunction, typename TargetInfo>
965std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
966{
967 ARM_COMPUTE_UNUSED(ctx);
968
969 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
970
971 // Extract IO and info
972 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
973 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
974 const NormalizationLayerInfo norm_info = node.normalization_info();
975 ARM_COMPUTE_ERROR_ON(input == nullptr);
976 ARM_COMPUTE_ERROR_ON(output == nullptr);
977
978 // Create and configure function
979 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
980 func->configure(input, output, norm_info);
981
982 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000983 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
984 << node.name()
985 << " Type: " << node.type()
986 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100987 << " Data Type: " << input->info()->data_type()
988 << " Input shape: " << input->info()->tensor_shape()
989 << " Output shape: " << output->info()->tensor_shape()
990 << " Normalization info: " << norm_info.type()
991 << std::endl);
992
993 return std::move(func);
994}
995
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100996/** Create a backend normalize planar YUV layer function
997 *
998 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
999 * @tparam TargetInfo Target-specific information
1000 *
1001 * @param[in] node Node to create the backend function for
1002 *
1003 * @return Backend normalize plnar YUV layer function
1004 */
1005template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1006std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1007{
1008 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1009
1010 // Extract IO and info
1011 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1012 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1013 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1014 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1015 ARM_COMPUTE_ERROR_ON(input == nullptr);
1016 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1017 ARM_COMPUTE_ERROR_ON(std == nullptr);
1018 ARM_COMPUTE_ERROR_ON(output == nullptr);
1019
1020 // Create and configure function
1021 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1022 func->configure(input, output, mean, std);
1023
1024 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001025 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1026 << node.name()
1027 << " Type: " << node.type()
1028 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001029 << " Data Type: " << input->info()->data_type()
1030 << " Shape: " << input->info()->tensor_shape()
1031 << std::endl);
1032
1033 return std::move(func);
1034}
1035
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001036/** Create a backend pad layer function
1037 *
1038 * @tparam PadLayerFunction Backend pad function
1039 * @tparam TargetInfo Target-specific information
1040 *
1041 * @param[in] node Node to create the backend function for
1042 *
1043 * @return Backend pad layer function
1044 */
1045template <typename PadLayerFunction, typename TargetInfo>
1046std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1047{
1048 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1049
1050 // Extract IO and info
1051 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1052 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1053 const PaddingList &padding = node.padding();
1054 ARM_COMPUTE_ERROR_ON(input == nullptr);
1055 ARM_COMPUTE_ERROR_ON(output == nullptr);
1056
1057 // Create and configure function
1058 auto func = support::cpp14::make_unique<PadLayerFunction>();
1059 func->configure(input, output, padding);
1060
1061 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001062 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1063 << node.name()
1064 << " Type: " << node.type()
1065 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001066 << " Data Type: " << input->info()->data_type()
1067 << " Input shape: " << input->info()->tensor_shape()
1068 << " Output shape: " << output->info()->tensor_shape()
1069 << std::endl);
1070
1071 return std::move(func);
1072}
1073
Georgios Pinitas57c48242018-08-02 13:41:49 +01001074/** Create a backend permute layer function
1075 *
1076 * @tparam PermuteLayerFunction Backend permute function
1077 * @tparam TargetInfo Target-specific information
1078 *
1079 * @param[in] node Node to create the backend function for
1080 *
1081 * @return Backend permute layer function
1082 */
1083template <typename PermuteLayerFunction, typename TargetInfo>
1084std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1085{
1086 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1087
1088 // Extract IO and info
1089 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1090 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1091 const PermutationVector &perm = node.permutation_vector();
1092 ARM_COMPUTE_ERROR_ON(input == nullptr);
1093 ARM_COMPUTE_ERROR_ON(output == nullptr);
1094
1095 // Create and configure function
1096 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1097 func->configure(input, output, perm);
1098
1099 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001100 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1101 << node.name()
1102 << " Type: " << node.type()
1103 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001104 << " Data Type: " << input->info()->data_type()
1105 << " Input shape: " << input->info()->tensor_shape()
1106 << " Output shape: " << output->info()->tensor_shape()
1107 << " Permutation vector: " << perm
1108 << std::endl);
1109
1110 return std::move(func);
1111}
1112
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001113/** Create a backend pooling layer function
1114 *
1115 * @tparam PoolingLayerFunction Backend pooling function
1116 * @tparam TargetInfo Target-specific information
1117 *
1118 * @param[in] node Node to create the backend function for
1119 *
1120 * @return Backend pooling layer function
1121 */
1122template <typename PoolingLayerFunction, typename TargetInfo>
1123std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1124{
1125 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1126
1127 // Extract IO and info
1128 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1129 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1130 const PoolingLayerInfo pool_info = node.pooling_info();
1131 ARM_COMPUTE_ERROR_ON(input == nullptr);
1132 ARM_COMPUTE_ERROR_ON(output == nullptr);
1133
1134 // Create and configure function
1135 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1136 func->configure(input, output, pool_info);
1137
1138 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001139 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1140 << node.name()
1141 << " Type: " << node.type()
1142 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001143 << " Data Type: " << input->info()->data_type()
1144 << " Input shape: " << input->info()->tensor_shape()
1145 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001146 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001147 << std::endl);
1148
1149 return std::move(func);
1150}
1151
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001152/** Create a backend PRelu layer function
1153 *
1154 * @tparam PReluFunction Backend PRelu function
1155 * @tparam TargetInfo Target-specific information
1156 *
1157 * @param[in] node Node to create the backend function for
1158 *
1159 * @return Backend PRelu layer function
1160 */
1161template <typename PReluFunction, typename TargetInfo>
1162std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1163{
1164 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1165
1166 // Extract IO and info
1167 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1168 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1169 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1170 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1171 ARM_COMPUTE_ERROR_ON(output == nullptr);
1172
1173 // Create and configure function
1174 auto func = support::cpp14::make_unique<PReluFunction>();
1175 func->configure(input, alpha, output);
1176
1177 // Log info
1178 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1179 << node.name()
1180 << " Type: " << node.type()
1181 << " Target: " << TargetInfo::TargetType
1182 << " Data Type: " << input->info()->data_type()
1183 << " Input shape: " << input->info()->tensor_shape()
1184 << " Output shape: " << output->info()->tensor_shape()
1185 << std::endl);
1186
1187 return std::move(func);
1188}
1189
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001190/** Create a backend print layer function
1191 *
1192 * @tparam TargetInfo Target-specific information
1193 *
1194 * @param[in] node Node to create the backend function for
1195 *
1196 * @return Backend print layer function
1197 */
1198template <typename TargetInfo>
1199std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1200{
1201 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1202
1203 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1204 ARM_COMPUTE_ERROR_ON(input == nullptr);
1205 ARM_COMPUTE_UNUSED(input);
1206
1207 // Log info
1208 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1209 << node.name()
1210 << " Type: " << node.type()
1211 << " Target: " << TargetInfo::TargetType
1212 << " Data Type: " << input->info()->data_type()
1213 << " Input shape: " << input->info()->tensor_shape()
1214 << std::endl);
1215
1216 return nullptr;
1217}
1218
Pablo Tello32521432018-11-15 14:43:10 +00001219/** Create a backend priorbox layer function
1220 *
1221 * @tparam PriorBoxLayerFunction Backend priorbox function
1222 * @tparam TargetInfo Target-specific information
1223 *
1224 * @param[in] node Node to create the backend function for
1225 *
1226 * @return Backend priorbox layer function
1227 */
1228template <typename PriorBoxLayerFunction, typename TargetInfo>
1229std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1230{
1231 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1232
1233 // Extract IO and info
1234 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1235 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1236 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1237 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1238 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1239 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1240 ARM_COMPUTE_ERROR_ON(output == nullptr);
1241
1242 // Create and configure function
1243 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1244 func->configure(input0, input1, output, prior_info);
1245
1246 // Log info
1247 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1248 << node.name()
1249 << " Type: " << node.type()
1250 << " Target: " << TargetInfo::TargetType
1251 << " Data Type: " << input0->info()->data_type()
1252 << " Input0 shape: " << input0->info()->tensor_shape()
1253 << " Input1 shape: " << input1->info()->tensor_shape()
1254 << " Output shape: " << output->info()->tensor_shape()
1255 << " PriorBoxLayer info: " << prior_info
1256 << std::endl);
1257
1258 return std::move(func);
1259}
1260
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001261/** Create a backend quantization layer function
1262 *
1263 * @tparam QuantizationLayerFunction Backend quantization function
1264 * @tparam TargetInfo Target-specific information
1265 *
1266 * @param[in] node Node to create the backend function for
1267 *
1268 * @return Backend quantization layer function
1269 */
1270template <typename QuantizationLayerFunction, typename TargetInfo>
1271std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1272{
1273 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1274
1275 // Extract IO and info
1276 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1277 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1278 ARM_COMPUTE_ERROR_ON(input == nullptr);
1279 ARM_COMPUTE_ERROR_ON(output == nullptr);
1280
1281 // Create and configure function
1282 auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1283 func->configure(input, output);
1284
1285 // Log info
1286 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1287 << node.name()
1288 << " Type: " << node.type()
1289 << " Target: " << TargetInfo::TargetType
1290 << " Data Type: " << input->info()->data_type()
1291 << " Input shape: " << input->info()->tensor_shape()
1292 << " Output shape: " << output->info()->tensor_shape()
1293 << std::endl);
1294
1295 return std::move(func);
1296}
1297
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001298/** Create a backend reorg layer function
1299 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001300 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001301 * @tparam TargetInfo Target-specific information
1302 *
1303 * @param[in] node Node to create the backend function for
1304 *
1305 * @return Backend reshape layer function
1306 */
1307template <typename ReorgLayerFunction, typename TargetInfo>
1308std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1309{
1310 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1311
1312 // Extract IO and info
1313 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1314 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1315 ARM_COMPUTE_ERROR_ON(input == nullptr);
1316 ARM_COMPUTE_ERROR_ON(output == nullptr);
1317
1318 // Create and configure function
1319 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1320 func->configure(input, output, node.stride());
1321
1322 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001323 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1324 << node.name()
1325 << " Type: " << node.type()
1326 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001327 << " Data Type: " << input->info()->data_type()
1328 << " Input shape: " << input->info()->tensor_shape()
1329 << " Output shape: " << output->info()->tensor_shape()
1330 << std::endl);
1331
1332 return std::move(func);
1333}
1334
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001335/** Create a backend reshape layer function
1336 *
1337 * @tparam ReshapeLayerFunction Backend reshape function
1338 * @tparam TargetInfo Target-specific information
1339 *
1340 * @param[in] node Node to create the backend function for
1341 *
1342 * @return Backend reshape layer function
1343 */
1344template <typename ReshapeLayerFunction, typename TargetInfo>
1345std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1346{
1347 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1348
1349 // Extract IO and info
1350 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1351 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1352 ARM_COMPUTE_ERROR_ON(input == nullptr);
1353 ARM_COMPUTE_ERROR_ON(output == nullptr);
1354
1355 // Create and configure function
1356 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1357 func->configure(input, output);
1358
1359 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001360 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1361 << node.name()
1362 << " Type: " << node.type()
1363 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001364 << " Data Type: " << input->info()->data_type()
1365 << " Input shape: " << input->info()->tensor_shape()
1366 << " Output shape: " << output->info()->tensor_shape()
1367 << std::endl);
1368
1369 return std::move(func);
1370}
1371
1372/** Create a backend resize layer function
1373 *
1374 * @tparam ResizeLayerFunction Backend resize function
1375 * @tparam TargetInfo Target-specific information
1376 *
1377 * @param[in] node Node to create the backend function for
1378 *
1379 * @return Backend resize layer function
1380 */
1381template <typename ResizeLayerFunction, typename TargetInfo>
1382std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1383{
1384 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1385
1386 // Extract IO and info
1387 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1388 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1389 ARM_COMPUTE_ERROR_ON(input == nullptr);
1390 ARM_COMPUTE_ERROR_ON(output == nullptr);
1391 const InterpolationPolicy policy = node.policy();
1392
1393 // Create and configure function
1394 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
1395 func->configure(input, output, policy, BorderMode::CONSTANT);
1396
1397 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001398 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1399 << node.name()
1400 << " Type: " << node.type()
1401 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001402 << " Data Type: " << input->info()->data_type()
1403 << " Input shape: " << input->info()->tensor_shape()
1404 << " Output shape: " << output->info()->tensor_shape()
1405 << " Interpolation: " << policy
1406 << std::endl);
1407
1408 return std::move(func);
1409}
1410
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001411/** Create a backend ROI align layer function
1412 *
1413 * @tparam ROIAlignLayerFunction ROI Align function
1414 * @tparam TargetInfo Target-specific information
1415 *
1416 * @param[in] node Node to create the backend function for
1417 *
1418 * @return ROI Align layer function
1419 */
1420template <typename ROIAlignLayerFunction, typename TargetInfo>
1421std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1422{
1423 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1424
1425 // Extract IO and info
1426 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1427 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1428 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1429 ARM_COMPUTE_ERROR_ON(input == nullptr);
1430 ARM_COMPUTE_ERROR_ON(output == nullptr);
1431 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1432
1433 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1434
1435 // Create and configure function
1436 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1437
1438 func->configure(input, rois, output, pool_info);
1439
1440 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001441 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1442 << node.name()
1443 << " Type: " << node.type()
1444 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001445 << " Data Type: " << input->info()->data_type()
1446 << " Input shape: " << input->info()->tensor_shape()
1447 << " Output shape: " << output->info()->tensor_shape()
1448 << " ROIs shape: " << rois->info()->tensor_shape()
1449 << " ROIPooling width: " << pool_info.pooled_width()
1450 << " ROIPooling height: " << pool_info.pooled_height()
1451 << std::endl);
1452
1453 return std::move(func);
1454}
1455
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001456/** Create a backend slice layer function
1457 *
1458 * @tparam SliceLayerFunction Backend slice function
1459 * @tparam TargetInfo Target-specific information
1460 *
1461 * @param[in] node Node to create the backend function for
1462 *
1463 * @return Backend slice layer function
1464 */
1465template <typename SliceLayerFunction, typename TargetInfo>
1466std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1467{
1468 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1469
1470 // Extract IO and info
1471 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1472 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1473 ARM_COMPUTE_ERROR_ON(input == nullptr);
1474 ARM_COMPUTE_ERROR_ON(output == nullptr);
1475
1476 // Create and configure function
1477 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1478 func->configure(input, output, node.starts(), node.ends());
1479
1480 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001481 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1482 << node.name()
1483 << " Type: " << node.type()
1484 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001485 << " Data Type: " << input->info()->data_type()
1486 << " Input shape: " << input->info()->tensor_shape()
1487 << " Output shape: " << output->info()->tensor_shape()
1488 << std::endl);
1489
1490 return std::move(func);
1491}
1492
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001493/** Create a backend softmax layer function
1494 *
1495 * @tparam SoftmaxLayerFunction Backend softmax function
1496 * @tparam TargetInfo Target-specific information
1497 *
1498 * @param[in] node Node to create the backend function for
1499 * @param[in] ctx Graph context
1500 *
1501 * @return Backend softmax layer function
1502 */
1503template <typename SoftmaxLayerFunction, typename TargetInfo>
1504std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1505{
1506 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1507
1508 // Extract IO and info
1509 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1510 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1511 const float beta = node.beta();
1512 ARM_COMPUTE_ERROR_ON(input == nullptr);
1513 ARM_COMPUTE_ERROR_ON(output == nullptr);
1514
1515 // Create and configure function
1516 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1517 func->configure(input, output, beta);
1518
1519 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001520 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1521 << node.name()
1522 << " Type: " << node.type()
1523 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001524 << " Data Type: " << input->info()->data_type()
1525 << " Input shape: " << input->info()->tensor_shape()
1526 << " Output shape: " << output->info()->tensor_shape()
1527 << std::endl);
1528
1529 return std::move(func);
1530}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001531
1532/** Create a backend layer stack function
1533 *
1534 * @tparam StackLayerFunction Backend stack function
1535 * @tparam TargetInfo Target-specific information
1536 *
1537 * @param[in] node Node to create the backend function for
1538 *
1539 * @return Backend stack layer function
1540 */
1541template <typename StackLayerFunction, typename TargetInfo>
1542std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1543{
1544 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1545 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1546
1547 // Extract IO and info
1548 std::vector<typename TargetInfo::TensorType *> inputs;
1549 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1550 {
1551 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1552 }
1553 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1554 const int axis = node.axis();
1555
1556 // Create and configure function
1557 auto func = support::cpp14::make_unique<StackLayerFunction>();
1558 func->configure(inputs, axis, output);
1559
1560 // Log info
1561 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1562 << node.name()
1563 << " Type: " << node.type()
1564 << " Target: " << TargetInfo::TargetType
1565 << " Data Type: " << output->info()->data_type()
1566 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1567 << " Output shape: " << output->info()->tensor_shape()
1568 << " Num Inputs: " << inputs.size()
1569 << " Axis: " << axis
1570 << std::endl);
1571
1572 return std::move(func);
1573}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001574/** Create a backend Upsample layer function
1575 *
1576 * @tparam UpsampleLayerFunction Backend Upsample function
1577 * @tparam TargetInfo Target-specific information
1578 *
1579 * @param[in] node Node to create the backend function for
1580 * @param[in] ctx Graph context
1581 *
1582 * @return Backend Upsample layer function
1583 */
1584template <typename UpsampleLayerFunction, typename TargetInfo>
1585std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1586{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001587 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001588 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1589
1590 // Extract IO and info
1591 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1592 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1593 const Size2D info = node.info();
1594 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1595 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1596 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1597 ARM_COMPUTE_ERROR_ON(input == nullptr);
1598 ARM_COMPUTE_ERROR_ON(output == nullptr);
1599
1600 // Create and configure function
1601 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1602 func->configure(input, output, info, upsampling_policy);
1603
1604 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001605 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1606 << node.name()
1607 << " Type: " << node.type()
1608 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001609 << " Data Type: " << input->info()->data_type()
1610 << " Input shape: " << input->info()->tensor_shape()
1611 << " Output shape: " << output->info()->tensor_shape()
1612 << " Strides: " << info
1613 << " Upsampling policy: " << upsampling_policy
1614 << std::endl);
1615
1616 return std::move(func);
1617}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001618/** Create a backend YOLO layer function
1619 *
1620 * @tparam YoloLayerFunction Backend YOLO function
1621 * @tparam TargetInfo Target-specific information
1622 *
1623 * @param[in] node Node to create the backend function for
1624 * @param[in] ctx Graph context
1625 *
1626 * @return Backend YOLO layer function
1627 */
1628template <typename YOLOlayerFunction, typename TargetInfo>
1629std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1630{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001631 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001632 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1633
1634 // Extract IO and info
1635 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1636 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1637 const ActivationLayerInfo act_info = node.activation_info();
1638 const int32_t num_classes = node.num_classes();
1639 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1640 ARM_COMPUTE_ERROR_ON(input == nullptr);
1641 ARM_COMPUTE_ERROR_ON(output == nullptr);
1642
1643 // Create and configure function
1644 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1645 func->configure(input, output, act_info, num_classes);
1646
1647 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001648 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1649 << node.name()
1650 << " Type: " << node.type()
1651 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001652 << " Data Type: " << input->info()->data_type()
1653 << " Input shape: " << input->info()->tensor_shape()
1654 << " Output shape: " << output->info()->tensor_shape()
1655 << " Activation function: " << act_info.activation()
1656 << " Num classes: " << num_classes
1657 << std::endl);
1658
1659 return std::move(func);
1660}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001661} // namespace detail
1662} // namespace backends
1663} // namespace graph
1664} // namespace arm_compute
1665
Michalis Spyrouf4643372019-11-29 16:17:13 +00001666#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */