blob: 960011c1e217e3866282157a3a97b829a3540f57 [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Giuseppe Rossinibb365de2019-02-15 10:24:47 +00002 * Copyright (c) 2018-2019 ARM Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
25#define __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
26
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010031#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000032#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010033#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010034#include "arm_compute/graph/backends/Utils.h"
35#include "arm_compute/graph/nodes/Nodes.h"
36
37#include "arm_compute/core/Error.h"
38#include "arm_compute/core/Helpers.h"
39#include "arm_compute/core/ITensorInfo.h"
40#include "arm_compute/core/utils/misc/Cast.h"
41
42namespace arm_compute
43{
44namespace graph
45{
46namespace backends
47{
48namespace detail
49{
50/** Returns backing tensor of a given tensor
51 *
52 * @tparam TargetInfo Target information
53 *
54 * @param[in] tensor Tensor to extract the backing tensor from
55 *
56 * @return Backing tensor if present else nullptr
57 */
58template <typename TargetInfo>
59typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
60{
61 typename TargetInfo::TensorType *backing_tensor = nullptr;
62 if(tensor != nullptr)
63 {
64 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
65 // Get backing tensor handle
66 ITensorHandle *tensor_handle = tensor->handle();
67 // Get backing tensor
68 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
69 }
70
71 return backing_tensor;
72}
73
74template <typename TargetInfo>
75void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
76{
77 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000078 << " Target: " << TargetInfo::TargetType
79 << " ID: " << node.id()
80 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010081 << std::endl);
82
83 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
84 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
85 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010086 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010087}
88
89/** Creates a backend activation layer function
90 *
91 * @tparam ActivationLayerFunction Backend activation function
92 * @tparam TargetInfo Target-specific information
93 *
94 * @param[in] node Node to create the backend function for
95 *
96 * @return Backend activation layer function
97 */
98template <typename ActivationLayerFunction, typename TargetInfo>
99std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
100{
101 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
102
103 // Extract IO and info
104 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
105 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
106 const ActivationLayerInfo act_info = node.activation_info();
107
108 // Create function
109 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
110 func->configure(input, output, act_info);
111
Pablo Tello32521432018-11-15 14:43:10 +0000112 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
113 << node.name()
114 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000115 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100116 << " Data Type: " << input->info()->data_type()
117 << " Shape: " << input->info()->tensor_shape()
118 << " Activation function: " << act_info.activation()
119 << " a: " << act_info.a()
120 << " b: " << act_info.b()
121 << " InPlace : " << is_in_place_operation(input, output)
122 << std::endl);
123
124 return std::move(func);
125}
126
127/** Create a backend batch normalization layer function
128 *
129 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
130 * @tparam TargetInfo Target-specific information
131 *
132 * @param[in] node Node to create the backend function for
133 *
134 * @return Backend batch normalization layer function
135 */
136template <typename BatchNormalizationLayerFunction, typename TargetInfo>
137std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
138{
139 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
140
141 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000142 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
143 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
144 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
145 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
146 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
147
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100148 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
149 const float epsilon = node.epsilon();
150 const ActivationLayerInfo fused_act = node.fused_activation();
151
152 // Create and configure function
153 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
154 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
155
156 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000157 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
158 << node.name()
159 << " Type: " << node.type()
160 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100161 << " Data Type: " << input->info()->data_type()
162 << " Shape: " << input->info()->tensor_shape()
163 << " Epsilon: " << epsilon << " "
164 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000165 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100166 << std::endl);
167
168 return std::move(func);
169}
170
giuros01acce5042019-02-21 17:32:34 +0000171/** Create a backend batch normalization layer function
172 *
173 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
174 * @tparam TargetInfo Target-specific information
175 *
176 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000177 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000178 *
179 * @return Backend batch normalization layer function
180 */
181template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000182std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000183{
184 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
185
186 // Extract IO and info
187 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
188 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
189 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
190 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
191 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
192 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
193 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
194
195 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
196
197 const PadStrideInfo conv_info = node.convolution_info();
198 const unsigned int num_groups = node.num_groups();
199 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
200 const ActivationLayerInfo fused_act = node.fused_activation();
201 const float epsilon = node.epsilon();
202
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000203 // Create and configure function (we assume that functions have been validated before creation)
204 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
205 std::unique_ptr<IFunction> func;
206 std::string func_name;
207
208 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
209
giuros01acce5042019-02-21 17:32:34 +0000210 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000211 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
212 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000213
214 // Log info
215 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
216 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100217 << " Type: " << node.type()
218 << " Target: " << TargetInfo::TargetType
219 << " Data Type: " << input->info()->data_type()
220 << " Input shape: " << input->info()->tensor_shape()
221 << " Weights shape: " << weights->info()->tensor_shape()
222 << " Output shape: " << output->info()->tensor_shape()
223 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
224 << std::endl);
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000225 return func;
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100226}
227
228/** Create a backend fused depthwise convolution batch normalization layer function
229 *
230 * @tparam FusedLayerTypes Fused layer types
231 * @tparam TargetInfo Target-specific information
232 *
233 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000234 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100235 *
236 * @return Backend fused depthwise convolution batch normalization layer function
237 */
238template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000239std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100240{
241 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
242
243 // Extract IO and info
244 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
245 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
246 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
247 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
248 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
249 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
250 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
251
252 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
253
254 const PadStrideInfo conv_info = node.convolution_info();
255 const unsigned int depth_multiplier = node.depth_multiplier();
256 const ActivationLayerInfo fused_act = node.fused_activation();
257 const float epsilon = node.epsilon();
258
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000259 // Create and configure function (we assume that functions have been validated before creation)
260 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
261 std::unique_ptr<IFunction> func;
262 std::string func_name;
263
264 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
265
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100266 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000267 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
268 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100269
270 // Log info
271 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
272 << node.name()
273 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000274 << " Target: " << TargetInfo::TargetType
275 << " Data Type: " << input->info()->data_type()
276 << " Input shape: " << input->info()->tensor_shape()
277 << " Weights shape: " << weights->info()->tensor_shape()
278 << " Output shape: " << output->info()->tensor_shape()
279 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
280 << std::endl);
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000281 return func;
giuros01acce5042019-02-21 17:32:34 +0000282}
283
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100284/** Create a backend bounding box transform layer function
285 *
286 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
287 * @tparam TargetInfo Target-specific information
288 *
289 * @param[in] node Node to create the backend function for
290 *
291 * @return Backend bounding box transform layer function
292 */
293template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
294std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
295{
296 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
297
298 // Extract IO and info
299 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
300 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
301 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
302 const BoundingBoxTransformInfo bbox_info = node.info();
303
304 // Create and configure function
305 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
306 func->configure(input, output, deltas, bbox_info);
307
308 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000309 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
310 << node.name()
311 << " Type: " << node.type()
312 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100313 << " Data Type: " << input->info()->data_type()
314 << " Shape: " << input->info()->tensor_shape()
315 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
316 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
317 << std::endl);
318
319 return std::move(func);
320}
321
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100322/** Create a backend channel shuffle layer function
323 *
324 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
325 * @tparam TargetInfo Target-specific information
326 *
327 * @param[in] node Node to create the backend function for
328 *
329 * @return Backend channel shuffle layer function
330 */
331template <typename ChannelShuffleLayerFunction, typename TargetInfo>
332std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
333{
334 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
335
336 // Extract IO and info
337 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
338 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
339 const unsigned int num_groups = node.num_groups();
340
341 // Create function
342 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
343 func->configure(input, output, num_groups);
344
Pablo Tello32521432018-11-15 14:43:10 +0000345 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
346 << node.name()
347 << " Type: " << node.type()
348 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100349 << " Data Type: " << input->info()->data_type()
350 << " Shape: " << input->info()->tensor_shape()
351 << " Num groups: " << num_groups
352 << std::endl);
353
354 return std::move(func);
355}
356
Georgios Pinitase2220552018-07-20 13:23:44 +0100357/** Create a backend layer concatenate function
358 *
359 * @tparam ConcatenateLayerFunction Backend concatenate function
360 * @tparam TargetInfo Target-specific information
361 *
362 * @param[in] node Node to create the backend function for
363 *
364 * @return Backend concatenate layer function
365 */
366template <typename ConcatenateLayerFunction, typename TargetInfo>
367std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
368{
369 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
370 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
371
372 // Return nullptr if depth concatenate is switched off
373 if(!node.is_enabled())
374 {
375 return nullptr;
376 }
377
378 // Extract IO and info
379 std::vector<typename TargetInfo::TensorType *> inputs;
380 for(unsigned int i = 0; i < node.num_inputs(); ++i)
381 {
382 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
383 }
384 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100385 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
386 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100387
388 // Create and configure function
389 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
390 func->configure(inputs, output, concat_axis);
391
392 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000393 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
394 std::ostringstream qss;
395 if(is_quantized)
396 {
397 qss << " Output QuantInfo: " << output->info()->quantization_info();
398 }
Pablo Tello32521432018-11-15 14:43:10 +0000399 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
400 << node.name()
401 << " Type: " << node.type()
402 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100403 << " Data Type: " << output->info()->data_type()
404 << " Shape: " << output->info()->tensor_shape()
405 << " Num Inputs: " << inputs.size()
406 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000407 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100408 << std::endl);
409
410 return std::move(func);
411}
412
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100413/** Create a backend convolution layer function
414 *
415 * @tparam ConvolutionLayerFunctions Backend convolution functions
416 * @tparam TargetInfo Target-specific information
417 *
418 * @param[in] node Node to create the backend function for
419 * @param[in] ctx Graph context
420 *
421 * @return Backend convolution layer function
422 */
423template <typename ConvolutionLayerFunctions, typename TargetInfo>
424std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
425{
426 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
427
428 // Extract IO and info
429 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
430 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
431 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
432 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
433
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100434 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
435
436 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100437 {
438 biases->info()->set_data_type(DataType::S32);
439 }
440
Georgios Pinitas08346e92018-10-16 19:10:46 +0100441 const PadStrideInfo conv_info = node.convolution_info();
442 const unsigned int num_groups = node.num_groups();
443 const ConvolutionMethod conv_algorithm = node.convolution_method();
444 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
445 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100446
447 // Create and configure function (we assume that functions have been validated before creation)
448 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
449 std::unique_ptr<IFunction> func;
450 std::string func_name;
451
Georgios Pinitase2220552018-07-20 13:23:44 +0100452 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100454 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100455 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
456 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100457 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100458 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100459 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100460 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100461 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100462 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
463 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100464 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 }
466 else if(conv_algorithm == ConvolutionMethod::GEMM)
467 {
468 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
469 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100470 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100471 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100472 }
473 else
474 {
475 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
476 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100477 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100478 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100479 }
480
481 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100482 std::ostringstream qss;
483 if(is_quantized)
484 {
485 qss << " Input QuantInfo: " << input->info()->quantization_info()
486 << " Weights QuantInfo: " << weights->info()->quantization_info()
487 << " Output QuantInfo: " << output->info()->quantization_info();
488 }
Pablo Tello32521432018-11-15 14:43:10 +0000489 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
490 << node.name()
491 << " Type: " << func_name
492 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100493 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100494 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100495 << " Input shape: " << input->info()->tensor_shape()
496 << " Weights shape: " << weights->info()->tensor_shape()
497 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000498 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100499 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 << std::endl);
501 return func;
502}
503
504/** Create a backend deconvolution layer function
505 *
506 * @tparam DeconvolutionLayerFunction Backend deconvolution function
507 * @tparam TargetInfo Target-specific information
508 *
509 * @param[in] node Node to create the backend function for
510 * @param[in] ctx Graph context
511 *
512 * @return Backend deconvolution layer function
513 */
514template <typename DeconvolutionLayerFunction, typename TargetInfo>
515std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
516{
517 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
518
519 // Extract IO and info
520 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
521 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
522 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
523 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
524
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100525 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100526
527 // Create and configure function (we assume that functions have been validated before creation)
528 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
529 std::unique_ptr<IFunction> func;
530
531 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
532 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100533 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100534
535 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000536 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
537 << node.name()
538 << " Type: " << node.type()
539 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100540 << " Data Type: " << input->info()->data_type()
541 << " Input shape: " << input->info()->tensor_shape()
542 << " Weights shape: " << weights->info()->tensor_shape()
543 << " Output shape: " << output->info()->tensor_shape()
544 << std::endl);
545 return func;
546}
547
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100548/** Create a backend layer depth-wise convolution function
549 *
550 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
551 * @tparam TargetInfo Target-specific information
552 *
553 * @param[in] node Node to create the backend function for
554 *
555 * @return Backend depth-wise convolution layer function
556 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100557template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100558std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
559{
560 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
561
562 // Extract IO and info
563 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
564 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
565 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
566 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
567
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100568 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
569
570 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100571 {
572 biases->info()->set_data_type(DataType::S32);
573 }
574
Manuel Bottini05069f02019-09-26 17:18:26 +0100575 const PadStrideInfo conv_info = node.convolution_info();
576 const unsigned int depth_multiplier = node.depth_multiplier();
577 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100578
579 // Create and configure function (we assume that functions have been validated before creation)
580 std::unique_ptr<IFunction> func;
581 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100582
583 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
584 std::string("DepthwiseConvolutionLayer"),
585 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100586
587 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100588 std::ostringstream qss;
589 if(is_quantized)
590 {
591 qss << " Input QuantInfo: " << input->info()->quantization_info()
592 << " Weights QuantInfo: " << weights->info()->quantization_info()
593 << " Output QuantInfo: " << output->info()->quantization_info();
594 }
Pablo Tello32521432018-11-15 14:43:10 +0000595 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
596 << node.name()
597 << " Type: " << func_name
598 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100599 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100600 << " Input shape: " << input->info()->tensor_shape()
601 << " Weights shape: " << weights->info()->tensor_shape()
602 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000603 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000604 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100605 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100606 << std::endl);
607 return func;
608}
609
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000610/** Create a backend dequantize layer function
611 *
612 * @tparam DequantizationLayer Function Backend dequantize function
613 * @tparam TargetInfo Target-specific information
614 *
615 * @param[in] node Node to create the backend function for
616 *
617 * @return Backend dequantize layer function
618 */
619template <typename DequantizationLayerFunction, typename TargetInfo>
620std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
621{
622 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
623
624 // Extract IO and info
625 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
626 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
627
628 ARM_COMPUTE_ERROR_ON(input == nullptr);
629 ARM_COMPUTE_ERROR_ON(output == nullptr);
630
631 // Create and configure function
632 auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
633 func->configure(input, output);
634
635 // Log info
636 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
637 << node.name()
638 << " Type: " << node.type()
639 << " Target: " << TargetInfo::TargetType
640 << " Data Type: " << input->info()->data_type()
641 << " Input shape: " << input->info()->tensor_shape()
642 << " Input quantization info: " << output->info()->quantization_info()
643 << " Output shape: " << output->info()->tensor_shape()
644 << std::endl);
645
646 return std::move(func);
647}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000648/** Create a backend detection output layer function
649 *
650 * @tparam DetectionOutputLayer Function Backend detection output function
651 * @tparam TargetInfo Target-specific information
652 *
653 * @param[in] node Node to create the backend function for
654 *
655 * @return Backend detection output layer function
656 */
657template <typename DetectionOutputLayerFunction, typename TargetInfo>
658std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
659{
660 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
661
662 // Extract IO and info
663 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
664 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
665 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
666 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
667 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
668
669 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
670 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
671 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
672 ARM_COMPUTE_ERROR_ON(output == nullptr);
673
674 // Create and configure function
675 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
676 func->configure(input0, input1, input2, output, detect_info);
677
678 // Log info
679 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
680 << node.name()
681 << " Type: " << node.type()
682 << " Target: " << TargetInfo::TargetType
683 << " Data Type: " << input0->info()->data_type()
684 << " Input0 shape: " << input0->info()->tensor_shape()
685 << " Input1 shape: " << input1->info()->tensor_shape()
686 << " Input2 shape: " << input2->info()->tensor_shape()
687 << " Output shape: " << output->info()->tensor_shape()
688 << " DetectionOutputLayer info: " << detect_info
689 << std::endl);
690
691 return std::move(func);
692}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000693
694/** Create a backend detection post process layer function
695 *
696 * @tparam DetectionPostProcessLayerFunction Backend detection output function
697 * @tparam TargetInfo Target-specific information
698 *
699 * @param[in] node Node to create the backend function for
700 *
701 * @return Backend detection post process layer function
702 */
703template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
704std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
705{
706 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
707
708 // Extract IO and info
709 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
710 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
711 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
712 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
713 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
714 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
715 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
716 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
717
718 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
719 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
720 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
721 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
722 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
723 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
724 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
725
726 // Create and configure function
727 auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
728 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
729
730 // Log info
731 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
732 << node.name()
733 << " Type: " << node.type()
734 << " Target: " << TargetInfo::TargetType
735 << " Data Type: " << input0->info()->data_type()
736 << " Input0 shape: " << input0->info()->tensor_shape()
737 << " Input1 shape: " << input1->info()->tensor_shape()
738 << " Input2 shape: " << input2->info()->tensor_shape()
739 << " Output0 shape: " << output0->info()->tensor_shape()
740 << " Output1 shape: " << output1->info()->tensor_shape()
741 << " Output2 shape: " << output2->info()->tensor_shape()
742 << " Output3 shape: " << output3->info()->tensor_shape()
743 << " DetectionPostProcessLayer info: " << detect_info
744 << std::endl);
745
746 return std::move(func);
747}
748
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100749/** Create a backend element-wise operation layer function
750 *
751 * @tparam EltwiseFunctions Backend element-wise function
752 * @tparam TargetInfo Target-specific information
753 *
754 * @param[in] node Node to create the backend function for
755 *
756 * @return Backend element-wise operation layer function
757 */
758template <typename EltwiseFunctions, typename TargetInfo>
759std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
760{
761 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
762
763 // Extract IO and info
764 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
765 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
766 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
767 const EltwiseOperation eltwise_op = node.eltwise_operation();
768 const ConvertPolicy convert_policy = node.convert_policy();
769 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
770 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
771 ARM_COMPUTE_ERROR_ON(output == nullptr);
772
773 std::unique_ptr<IFunction> func = nullptr;
774 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100775 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100776 {
777 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
778 std::string("ArithmeticAddition"),
779 input1, input2, output, convert_policy);
780 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100781 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100782 {
783 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
784 std::string("ArithmeticSubtraction"),
785 input1, input2, output, convert_policy);
786 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100787 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100788 {
789 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
790 std::string("PixelWiseMultiplication"),
791 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
792 }
793 else
794 {
795 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
796 }
797
798 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000799 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
800 << node.name()
801 << " Type: " << node.type()
802 << " Target: " << TargetInfo::TargetType
803 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100804 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000805 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100806 << std::endl);
807
808 return func;
809}
810
811/** Create a backend flatten layer function
812 *
813 * @tparam FlattenLayerFunction Backend flatten function
814 * @tparam TargetInfo Target-specific information
815 *
816 * @param[in] node Node to create the backend function for
817 *
818 * @return Backend flatten layer function
819 */
820template <typename FlattenLayerFunction, typename TargetInfo>
821std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
822{
823 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
824
825 // Extract IO and info
826 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
827 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
828
Georgios Pinitase2220552018-07-20 13:23:44 +0100829 ARM_COMPUTE_ERROR_ON(input == nullptr);
830 ARM_COMPUTE_ERROR_ON(output == nullptr);
831
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100832 // Create and configure function
833 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
834 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100835
836 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000837 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
838 << node.name()
839 << " Type: " << node.type()
840 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100841 << " Data Type: " << input->info()->data_type()
842 << " Input shape: " << input->info()->tensor_shape()
843 << " Output shape: " << output->info()->tensor_shape()
844 << std::endl);
845
846 return std::move(func);
847}
848
849/** Create a backend fully connected layer function
850 *
851 * @tparam FullyConnectedLayerFunction Backend fully-connected function
852 * @tparam TargetInfo Target-specific information
853 *
854 * @param[in] node Node to create the backend function for
855 * @param[in] ctx Graph context
856 *
857 * @return Backend fully connected layer function
858 */
859template <typename FullyConnectedLayerFunction, typename TargetInfo>
860std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
861{
862 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
863
864 // Extract IO and info
865 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
866 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
867 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
868 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100869 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100870
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100871 ARM_COMPUTE_ERROR_ON(input == nullptr);
872 ARM_COMPUTE_ERROR_ON(weights == nullptr);
873 ARM_COMPUTE_ERROR_ON(output == nullptr);
874
Georgios Pinitase2220552018-07-20 13:23:44 +0100875 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100876 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
877 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
878 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +0100879 func->configure(input, weights, biases, output, fc_info);
880
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100881 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
882
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100883 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100884 std::ostringstream qss;
885 if(is_quantized)
886 {
887 qss << " Input QuantInfo: " << input->info()->quantization_info()
888 << " Weights QuantInfo: " << weights->info()->quantization_info()
889 << " Output QuantInfo: " << output->info()->quantization_info();
890 }
Pablo Tello32521432018-11-15 14:43:10 +0000891 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
892 << node.name()
893 << " Type: " << node.type()
894 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100895 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100896 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100897 << " Input shape: " << input->info()->tensor_shape()
898 << " Weights shape: " << weights->info()->tensor_shape()
899 << " Output shape: " << output->info()->tensor_shape()
900 << std::endl);
901
902 return std::move(func);
903}
904
Manuel Bottini5209be52019-02-13 16:34:56 +0000905/** Create a backend generate proposals layer function
906 *
907 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
908 * @tparam TargetInfo Target-specific information
909 *
910 * @param[in] node Node to create the backend function for
911 * @param[in] ctx Graph context
912 *
913 * @return Backend generate proposals layer function
914 */
915template <typename GenerateProposalsLayerFunction, typename TargetInfo>
916std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
917{
918 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
919
920 // Extract IO and info
921 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
922 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
923 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
924 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
925 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
926 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
927 const GenerateProposalsInfo info = node.info();
928
929 ARM_COMPUTE_ERROR_ON(scores == nullptr);
930 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
931 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
932 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
933 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
934
935 // Create and configure function
936 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
937 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
938
939 // Log info
940 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
941 << " Target " << TargetInfo::TargetType
942 << " Data Type: " << scores->info()->data_type()
943 << " Scores shape: " << scores->info()->tensor_shape()
944 << " Deltas shape: " << deltas->info()->tensor_shape()
945 << " Anchors shape: " << anchors->info()->tensor_shape()
946 << " Proposals shape: " << proposals->info()->tensor_shape()
947 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
948 << " Scores Out shape: " << scores_out->info()->tensor_shape()
949 << std::endl);
950
951 return std::move(func);
952}
953
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100954/** Create a backend normalization layer function
955 *
956 * @tparam NormalizationLayerFunction Backend normalization function
957 * @tparam TargetInfo Target-specific information
958 *
959 * @param[in] node Node to create the backend function for
960 * @param[in] ctx Graph context
961 *
962 * @return Backend normalization layer function
963 */
964template <typename NormalizationLayerFunction, typename TargetInfo>
965std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
966{
967 ARM_COMPUTE_UNUSED(ctx);
968
969 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
970
971 // Extract IO and info
972 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
973 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
974 const NormalizationLayerInfo norm_info = node.normalization_info();
975 ARM_COMPUTE_ERROR_ON(input == nullptr);
976 ARM_COMPUTE_ERROR_ON(output == nullptr);
977
978 // Create and configure function
979 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
980 func->configure(input, output, norm_info);
981
982 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000983 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
984 << node.name()
985 << " Type: " << node.type()
986 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100987 << " Data Type: " << input->info()->data_type()
988 << " Input shape: " << input->info()->tensor_shape()
989 << " Output shape: " << output->info()->tensor_shape()
990 << " Normalization info: " << norm_info.type()
991 << std::endl);
992
993 return std::move(func);
994}
995
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100996/** Create a backend normalize planar YUV layer function
997 *
998 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
999 * @tparam TargetInfo Target-specific information
1000 *
1001 * @param[in] node Node to create the backend function for
1002 *
1003 * @return Backend normalize plnar YUV layer function
1004 */
1005template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1006std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1007{
1008 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1009
1010 // Extract IO and info
1011 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1012 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1013 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1014 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1015 ARM_COMPUTE_ERROR_ON(input == nullptr);
1016 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1017 ARM_COMPUTE_ERROR_ON(std == nullptr);
1018 ARM_COMPUTE_ERROR_ON(output == nullptr);
1019
1020 // Create and configure function
1021 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1022 func->configure(input, output, mean, std);
1023
1024 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001025 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1026 << node.name()
1027 << " Type: " << node.type()
1028 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001029 << " Data Type: " << input->info()->data_type()
1030 << " Shape: " << input->info()->tensor_shape()
1031 << std::endl);
1032
1033 return std::move(func);
1034}
1035
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001036/** Create a backend pad layer function
1037 *
1038 * @tparam PadLayerFunction Backend pad function
1039 * @tparam TargetInfo Target-specific information
1040 *
1041 * @param[in] node Node to create the backend function for
1042 *
1043 * @return Backend pad layer function
1044 */
1045template <typename PadLayerFunction, typename TargetInfo>
1046std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1047{
1048 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1049
1050 // Extract IO and info
1051 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1052 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1053 const PaddingList &padding = node.padding();
1054 ARM_COMPUTE_ERROR_ON(input == nullptr);
1055 ARM_COMPUTE_ERROR_ON(output == nullptr);
1056
1057 // Create and configure function
1058 auto func = support::cpp14::make_unique<PadLayerFunction>();
1059 func->configure(input, output, padding);
1060
1061 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001062 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1063 << node.name()
1064 << " Type: " << node.type()
1065 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001066 << " Data Type: " << input->info()->data_type()
1067 << " Input shape: " << input->info()->tensor_shape()
1068 << " Output shape: " << output->info()->tensor_shape()
1069 << std::endl);
1070
1071 return std::move(func);
1072}
1073
Georgios Pinitas57c48242018-08-02 13:41:49 +01001074/** Create a backend permute layer function
1075 *
1076 * @tparam PermuteLayerFunction Backend permute function
1077 * @tparam TargetInfo Target-specific information
1078 *
1079 * @param[in] node Node to create the backend function for
1080 *
1081 * @return Backend permute layer function
1082 */
1083template <typename PermuteLayerFunction, typename TargetInfo>
1084std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1085{
1086 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1087
1088 // Extract IO and info
1089 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1090 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1091 const PermutationVector &perm = node.permutation_vector();
1092 ARM_COMPUTE_ERROR_ON(input == nullptr);
1093 ARM_COMPUTE_ERROR_ON(output == nullptr);
1094
1095 // Create and configure function
1096 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1097 func->configure(input, output, perm);
1098
1099 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001100 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1101 << node.name()
1102 << " Type: " << node.type()
1103 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001104 << " Data Type: " << input->info()->data_type()
1105 << " Input shape: " << input->info()->tensor_shape()
1106 << " Output shape: " << output->info()->tensor_shape()
1107 << " Permutation vector: " << perm
1108 << std::endl);
1109
1110 return std::move(func);
1111}
1112
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001113/** Create a backend pooling layer function
1114 *
1115 * @tparam PoolingLayerFunction Backend pooling function
1116 * @tparam TargetInfo Target-specific information
1117 *
1118 * @param[in] node Node to create the backend function for
1119 *
1120 * @return Backend pooling layer function
1121 */
1122template <typename PoolingLayerFunction, typename TargetInfo>
1123std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1124{
1125 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1126
1127 // Extract IO and info
1128 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1129 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1130 const PoolingLayerInfo pool_info = node.pooling_info();
1131 ARM_COMPUTE_ERROR_ON(input == nullptr);
1132 ARM_COMPUTE_ERROR_ON(output == nullptr);
1133
1134 // Create and configure function
1135 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1136 func->configure(input, output, pool_info);
1137
1138 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001139 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1140 << node.name()
1141 << " Type: " << node.type()
1142 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001143 << " Data Type: " << input->info()->data_type()
1144 << " Input shape: " << input->info()->tensor_shape()
1145 << " Output shape: " << output->info()->tensor_shape()
1146 << " Pooling info: " << pool_info.pool_type()
1147 << std::endl);
1148
1149 return std::move(func);
1150}
1151
Pablo Tello32521432018-11-15 14:43:10 +00001152/** Create a backend priorbox layer function
1153 *
1154 * @tparam PriorBoxLayerFunction Backend priorbox function
1155 * @tparam TargetInfo Target-specific information
1156 *
1157 * @param[in] node Node to create the backend function for
1158 *
1159 * @return Backend priorbox layer function
1160 */
1161template <typename PriorBoxLayerFunction, typename TargetInfo>
1162std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1163{
1164 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1165
1166 // Extract IO and info
1167 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1168 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1169 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1170 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1171 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1172 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1173 ARM_COMPUTE_ERROR_ON(output == nullptr);
1174
1175 // Create and configure function
1176 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1177 func->configure(input0, input1, output, prior_info);
1178
1179 // Log info
1180 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1181 << node.name()
1182 << " Type: " << node.type()
1183 << " Target: " << TargetInfo::TargetType
1184 << " Data Type: " << input0->info()->data_type()
1185 << " Input0 shape: " << input0->info()->tensor_shape()
1186 << " Input1 shape: " << input1->info()->tensor_shape()
1187 << " Output shape: " << output->info()->tensor_shape()
1188 << " PriorBoxLayer info: " << prior_info
1189 << std::endl);
1190
1191 return std::move(func);
1192}
1193
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001194/** Create a backend quantization layer function
1195 *
1196 * @tparam QuantizationLayerFunction Backend quantization function
1197 * @tparam TargetInfo Target-specific information
1198 *
1199 * @param[in] node Node to create the backend function for
1200 *
1201 * @return Backend quantization layer function
1202 */
1203template <typename QuantizationLayerFunction, typename TargetInfo>
1204std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1205{
1206 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1207
1208 // Extract IO and info
1209 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1210 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1211 ARM_COMPUTE_ERROR_ON(input == nullptr);
1212 ARM_COMPUTE_ERROR_ON(output == nullptr);
1213
1214 // Create and configure function
1215 auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1216 func->configure(input, output);
1217
1218 // Log info
1219 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1220 << node.name()
1221 << " Type: " << node.type()
1222 << " Target: " << TargetInfo::TargetType
1223 << " Data Type: " << input->info()->data_type()
1224 << " Input shape: " << input->info()->tensor_shape()
1225 << " Output shape: " << output->info()->tensor_shape()
1226 << std::endl);
1227
1228 return std::move(func);
1229}
1230
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001231/** Create a backend reorg layer function
1232 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001233 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001234 * @tparam TargetInfo Target-specific information
1235 *
1236 * @param[in] node Node to create the backend function for
1237 *
1238 * @return Backend reshape layer function
1239 */
1240template <typename ReorgLayerFunction, typename TargetInfo>
1241std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1242{
1243 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1244
1245 // Extract IO and info
1246 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1247 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1248 ARM_COMPUTE_ERROR_ON(input == nullptr);
1249 ARM_COMPUTE_ERROR_ON(output == nullptr);
1250
1251 // Create and configure function
1252 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1253 func->configure(input, output, node.stride());
1254
1255 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001256 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1257 << node.name()
1258 << " Type: " << node.type()
1259 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001260 << " Data Type: " << input->info()->data_type()
1261 << " Input shape: " << input->info()->tensor_shape()
1262 << " Output shape: " << output->info()->tensor_shape()
1263 << std::endl);
1264
1265 return std::move(func);
1266}
1267
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001268/** Create a backend reshape layer function
1269 *
1270 * @tparam ReshapeLayerFunction Backend reshape function
1271 * @tparam TargetInfo Target-specific information
1272 *
1273 * @param[in] node Node to create the backend function for
1274 *
1275 * @return Backend reshape layer function
1276 */
1277template <typename ReshapeLayerFunction, typename TargetInfo>
1278std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1279{
1280 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1281
1282 // Extract IO and info
1283 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1284 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1285 ARM_COMPUTE_ERROR_ON(input == nullptr);
1286 ARM_COMPUTE_ERROR_ON(output == nullptr);
1287
1288 // Create and configure function
1289 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1290 func->configure(input, output);
1291
1292 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001293 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1294 << node.name()
1295 << " Type: " << node.type()
1296 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001297 << " Data Type: " << input->info()->data_type()
1298 << " Input shape: " << input->info()->tensor_shape()
1299 << " Output shape: " << output->info()->tensor_shape()
1300 << std::endl);
1301
1302 return std::move(func);
1303}
1304
1305/** Create a backend resize layer function
1306 *
1307 * @tparam ResizeLayerFunction Backend resize function
1308 * @tparam TargetInfo Target-specific information
1309 *
1310 * @param[in] node Node to create the backend function for
1311 *
1312 * @return Backend resize layer function
1313 */
1314template <typename ResizeLayerFunction, typename TargetInfo>
1315std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1316{
1317 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1318
1319 // Extract IO and info
1320 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1321 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1322 ARM_COMPUTE_ERROR_ON(input == nullptr);
1323 ARM_COMPUTE_ERROR_ON(output == nullptr);
1324 const InterpolationPolicy policy = node.policy();
1325
1326 // Create and configure function
1327 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
1328 func->configure(input, output, policy, BorderMode::CONSTANT);
1329
1330 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001331 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1332 << node.name()
1333 << " Type: " << node.type()
1334 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001335 << " Data Type: " << input->info()->data_type()
1336 << " Input shape: " << input->info()->tensor_shape()
1337 << " Output shape: " << output->info()->tensor_shape()
1338 << " Interpolation: " << policy
1339 << std::endl);
1340
1341 return std::move(func);
1342}
1343
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001344/** Create a backend ROI align layer function
1345 *
1346 * @tparam ROIAlignLayerFunction ROI Align function
1347 * @tparam TargetInfo Target-specific information
1348 *
1349 * @param[in] node Node to create the backend function for
1350 *
1351 * @return ROI Align layer function
1352 */
1353template <typename ROIAlignLayerFunction, typename TargetInfo>
1354std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1355{
1356 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1357
1358 // Extract IO and info
1359 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1360 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1361 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1362 ARM_COMPUTE_ERROR_ON(input == nullptr);
1363 ARM_COMPUTE_ERROR_ON(output == nullptr);
1364 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1365
1366 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1367
1368 // Create and configure function
1369 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1370
1371 func->configure(input, rois, output, pool_info);
1372
1373 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001374 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1375 << node.name()
1376 << " Type: " << node.type()
1377 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001378 << " Data Type: " << input->info()->data_type()
1379 << " Input shape: " << input->info()->tensor_shape()
1380 << " Output shape: " << output->info()->tensor_shape()
1381 << " ROIs shape: " << rois->info()->tensor_shape()
1382 << " ROIPooling width: " << pool_info.pooled_width()
1383 << " ROIPooling height: " << pool_info.pooled_height()
1384 << std::endl);
1385
1386 return std::move(func);
1387}
1388
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001389/** Create a backend slice layer function
1390 *
1391 * @tparam SliceLayerFunction Backend slice function
1392 * @tparam TargetInfo Target-specific information
1393 *
1394 * @param[in] node Node to create the backend function for
1395 *
1396 * @return Backend slice layer function
1397 */
1398template <typename SliceLayerFunction, typename TargetInfo>
1399std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1400{
1401 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1402
1403 // Extract IO and info
1404 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1405 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1406 ARM_COMPUTE_ERROR_ON(input == nullptr);
1407 ARM_COMPUTE_ERROR_ON(output == nullptr);
1408
1409 // Create and configure function
1410 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1411 func->configure(input, output, node.starts(), node.ends());
1412
1413 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001414 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1415 << node.name()
1416 << " Type: " << node.type()
1417 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001418 << " Data Type: " << input->info()->data_type()
1419 << " Input shape: " << input->info()->tensor_shape()
1420 << " Output shape: " << output->info()->tensor_shape()
1421 << std::endl);
1422
1423 return std::move(func);
1424}
1425
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001426/** Create a backend softmax layer function
1427 *
1428 * @tparam SoftmaxLayerFunction Backend softmax function
1429 * @tparam TargetInfo Target-specific information
1430 *
1431 * @param[in] node Node to create the backend function for
1432 * @param[in] ctx Graph context
1433 *
1434 * @return Backend softmax layer function
1435 */
1436template <typename SoftmaxLayerFunction, typename TargetInfo>
1437std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1438{
1439 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1440
1441 // Extract IO and info
1442 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1443 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1444 const float beta = node.beta();
1445 ARM_COMPUTE_ERROR_ON(input == nullptr);
1446 ARM_COMPUTE_ERROR_ON(output == nullptr);
1447
1448 // Create and configure function
1449 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1450 func->configure(input, output, beta);
1451
1452 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001453 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1454 << node.name()
1455 << " Type: " << node.type()
1456 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001457 << " Data Type: " << input->info()->data_type()
1458 << " Input shape: " << input->info()->tensor_shape()
1459 << " Output shape: " << output->info()->tensor_shape()
1460 << std::endl);
1461
1462 return std::move(func);
1463}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001464
1465/** Create a backend layer stack function
1466 *
1467 * @tparam StackLayerFunction Backend stack function
1468 * @tparam TargetInfo Target-specific information
1469 *
1470 * @param[in] node Node to create the backend function for
1471 *
1472 * @return Backend stack layer function
1473 */
1474template <typename StackLayerFunction, typename TargetInfo>
1475std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1476{
1477 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1478 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1479
1480 // Extract IO and info
1481 std::vector<typename TargetInfo::TensorType *> inputs;
1482 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1483 {
1484 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1485 }
1486 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1487 const int axis = node.axis();
1488
1489 // Create and configure function
1490 auto func = support::cpp14::make_unique<StackLayerFunction>();
1491 func->configure(inputs, axis, output);
1492
1493 // Log info
1494 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1495 << node.name()
1496 << " Type: " << node.type()
1497 << " Target: " << TargetInfo::TargetType
1498 << " Data Type: " << output->info()->data_type()
1499 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1500 << " Output shape: " << output->info()->tensor_shape()
1501 << " Num Inputs: " << inputs.size()
1502 << " Axis: " << axis
1503 << std::endl);
1504
1505 return std::move(func);
1506}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001507/** Create a backend Upsample layer function
1508 *
1509 * @tparam UpsampleLayerFunction Backend Upsample function
1510 * @tparam TargetInfo Target-specific information
1511 *
1512 * @param[in] node Node to create the backend function for
1513 * @param[in] ctx Graph context
1514 *
1515 * @return Backend Upsample layer function
1516 */
1517template <typename UpsampleLayerFunction, typename TargetInfo>
1518std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1519{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001520 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001521 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1522
1523 // Extract IO and info
1524 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1525 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1526 const Size2D info = node.info();
1527 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1528 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1529 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1530 ARM_COMPUTE_ERROR_ON(input == nullptr);
1531 ARM_COMPUTE_ERROR_ON(output == nullptr);
1532
1533 // Create and configure function
1534 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1535 func->configure(input, output, info, upsampling_policy);
1536
1537 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001538 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1539 << node.name()
1540 << " Type: " << node.type()
1541 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001542 << " Data Type: " << input->info()->data_type()
1543 << " Input shape: " << input->info()->tensor_shape()
1544 << " Output shape: " << output->info()->tensor_shape()
1545 << " Strides: " << info
1546 << " Upsampling policy: " << upsampling_policy
1547 << std::endl);
1548
1549 return std::move(func);
1550}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001551/** Create a backend YOLO layer function
1552 *
1553 * @tparam YoloLayerFunction Backend YOLO function
1554 * @tparam TargetInfo Target-specific information
1555 *
1556 * @param[in] node Node to create the backend function for
1557 * @param[in] ctx Graph context
1558 *
1559 * @return Backend YOLO layer function
1560 */
1561template <typename YOLOlayerFunction, typename TargetInfo>
1562std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1563{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001564 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001565 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1566
1567 // Extract IO and info
1568 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1569 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1570 const ActivationLayerInfo act_info = node.activation_info();
1571 const int32_t num_classes = node.num_classes();
1572 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1573 ARM_COMPUTE_ERROR_ON(input == nullptr);
1574 ARM_COMPUTE_ERROR_ON(output == nullptr);
1575
1576 // Create and configure function
1577 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1578 func->configure(input, output, act_info, num_classes);
1579
1580 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001581 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1582 << node.name()
1583 << " Type: " << node.type()
1584 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001585 << " Data Type: " << input->info()->data_type()
1586 << " Input shape: " << input->info()->tensor_shape()
1587 << " Output shape: " << output->info()->tensor_shape()
1588 << " Activation function: " << act_info.activation()
1589 << " Num classes: " << num_classes
1590 << std::endl);
1591
1592 return std::move(func);
1593}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001594} // namespace detail
1595} // namespace backends
1596} // namespace graph
1597} // namespace arm_compute
1598
1599#endif /* __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__ */