blob: dd833061a9b8b20ed31ba4bf03f0aee470f072bb [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Giuseppe Rossinibb365de2019-02-15 10:24:47 +00002 * Copyright (c) 2018-2019 ARM Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
25#define __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
26
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010031#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000032#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010033#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010034#include "arm_compute/graph/backends/Utils.h"
35#include "arm_compute/graph/nodes/Nodes.h"
36
37#include "arm_compute/core/Error.h"
38#include "arm_compute/core/Helpers.h"
39#include "arm_compute/core/ITensorInfo.h"
40#include "arm_compute/core/utils/misc/Cast.h"
41
42namespace arm_compute
43{
44namespace graph
45{
46namespace backends
47{
48namespace detail
49{
50/** Returns backing tensor of a given tensor
51 *
52 * @tparam TargetInfo Target information
53 *
54 * @param[in] tensor Tensor to extract the backing tensor from
55 *
56 * @return Backing tensor if present else nullptr
57 */
58template <typename TargetInfo>
59typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
60{
61 typename TargetInfo::TensorType *backing_tensor = nullptr;
62 if(tensor != nullptr)
63 {
64 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
65 // Get backing tensor handle
66 ITensorHandle *tensor_handle = tensor->handle();
67 // Get backing tensor
68 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
69 }
70
71 return backing_tensor;
72}
73
74template <typename TargetInfo>
75void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
76{
77 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000078 << " Target: " << TargetInfo::TargetType
79 << " ID: " << node.id()
80 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010081 << std::endl);
82
83 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
84 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
85 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
86}
87
88/** Creates a backend activation layer function
89 *
90 * @tparam ActivationLayerFunction Backend activation function
91 * @tparam TargetInfo Target-specific information
92 *
93 * @param[in] node Node to create the backend function for
94 *
95 * @return Backend activation layer function
96 */
97template <typename ActivationLayerFunction, typename TargetInfo>
98std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
99{
100 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
101
102 // Extract IO and info
103 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
104 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
105 const ActivationLayerInfo act_info = node.activation_info();
106
107 // Create function
108 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
109 func->configure(input, output, act_info);
110
Pablo Tello32521432018-11-15 14:43:10 +0000111 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
112 << node.name()
113 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000114 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100115 << " Data Type: " << input->info()->data_type()
116 << " Shape: " << input->info()->tensor_shape()
117 << " Activation function: " << act_info.activation()
118 << " a: " << act_info.a()
119 << " b: " << act_info.b()
120 << " InPlace : " << is_in_place_operation(input, output)
121 << std::endl);
122
123 return std::move(func);
124}
125
126/** Create a backend batch normalization layer function
127 *
128 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
129 * @tparam TargetInfo Target-specific information
130 *
131 * @param[in] node Node to create the backend function for
132 *
133 * @return Backend batch normalization layer function
134 */
135template <typename BatchNormalizationLayerFunction, typename TargetInfo>
136std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
137{
138 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
139
140 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000141 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
142 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
143 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
144 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
145 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
146
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100147 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
148 const float epsilon = node.epsilon();
149 const ActivationLayerInfo fused_act = node.fused_activation();
150
151 // Create and configure function
152 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
153 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
154
155 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000156 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
157 << node.name()
158 << " Type: " << node.type()
159 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100160 << " Data Type: " << input->info()->data_type()
161 << " Shape: " << input->info()->tensor_shape()
162 << " Epsilon: " << epsilon << " "
163 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000164 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100165 << std::endl);
166
167 return std::move(func);
168}
169
giuros01acce5042019-02-21 17:32:34 +0000170/** Create a backend batch normalization layer function
171 *
172 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
173 * @tparam TargetInfo Target-specific information
174 *
175 * @param[in] node Node to create the backend function for
176 *
177 * @return Backend batch normalization layer function
178 */
179template <typename FusedLayerTypes, typename TargetInfo>
180std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node)
181{
182 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
183
184 // Extract IO and info
185 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
186 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
187 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
188 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
189 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
190 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
191 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
192
193 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
194
195 const PadStrideInfo conv_info = node.convolution_info();
196 const unsigned int num_groups = node.num_groups();
197 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
198 const ActivationLayerInfo fused_act = node.fused_activation();
199 const float epsilon = node.epsilon();
200
giuros01acce5042019-02-21 17:32:34 +0000201 // Create and configure function
202 auto func = support::cpp14::make_unique<FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>>();
203 func->configure(input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
204
205 // Log info
206 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
207 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100208 << " Type: " << node.type()
209 << " Target: " << TargetInfo::TargetType
210 << " Data Type: " << input->info()->data_type()
211 << " Input shape: " << input->info()->tensor_shape()
212 << " Weights shape: " << weights->info()->tensor_shape()
213 << " Output shape: " << output->info()->tensor_shape()
214 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
215 << std::endl);
216 return std::move(func);
217}
218
219/** Create a backend fused depthwise convolution batch normalization layer function
220 *
221 * @tparam FusedLayerTypes Fused layer types
222 * @tparam TargetInfo Target-specific information
223 *
224 * @param[in] node Node to create the backend function for
225 *
226 * @return Backend fused depthwise convolution batch normalization layer function
227 */
228template <typename FusedLayerTypes, typename TargetInfo>
229std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node)
230{
231 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
232
233 // Extract IO and info
234 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
235 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
236 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
237 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
238 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
239 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
240 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
241
242 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
243
244 const PadStrideInfo conv_info = node.convolution_info();
245 const unsigned int depth_multiplier = node.depth_multiplier();
246 const ActivationLayerInfo fused_act = node.fused_activation();
247 const float epsilon = node.epsilon();
248
249 // Create and configure function
250 auto func = support::cpp14::make_unique<FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>>();
251 func->configure(input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
252
253 // Log info
254 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
255 << node.name()
256 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000257 << " Target: " << TargetInfo::TargetType
258 << " Data Type: " << input->info()->data_type()
259 << " Input shape: " << input->info()->tensor_shape()
260 << " Weights shape: " << weights->info()->tensor_shape()
261 << " Output shape: " << output->info()->tensor_shape()
262 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
263 << std::endl);
264 return std::move(func);
265}
266
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100267/** Create a backend bounding box transform layer function
268 *
269 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
270 * @tparam TargetInfo Target-specific information
271 *
272 * @param[in] node Node to create the backend function for
273 *
274 * @return Backend bounding box transform layer function
275 */
276template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
277std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
278{
279 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
280
281 // Extract IO and info
282 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
283 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
284 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
285 const BoundingBoxTransformInfo bbox_info = node.info();
286
287 // Create and configure function
288 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
289 func->configure(input, output, deltas, bbox_info);
290
291 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000292 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
293 << node.name()
294 << " Type: " << node.type()
295 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100296 << " Data Type: " << input->info()->data_type()
297 << " Shape: " << input->info()->tensor_shape()
298 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
299 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
300 << std::endl);
301
302 return std::move(func);
303}
304
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100305/** Create a backend channel shuffle layer function
306 *
307 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
308 * @tparam TargetInfo Target-specific information
309 *
310 * @param[in] node Node to create the backend function for
311 *
312 * @return Backend channel shuffle layer function
313 */
314template <typename ChannelShuffleLayerFunction, typename TargetInfo>
315std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
316{
317 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
318
319 // Extract IO and info
320 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
321 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
322 const unsigned int num_groups = node.num_groups();
323
324 // Create function
325 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
326 func->configure(input, output, num_groups);
327
Pablo Tello32521432018-11-15 14:43:10 +0000328 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
329 << node.name()
330 << " Type: " << node.type()
331 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100332 << " Data Type: " << input->info()->data_type()
333 << " Shape: " << input->info()->tensor_shape()
334 << " Num groups: " << num_groups
335 << std::endl);
336
337 return std::move(func);
338}
339
Georgios Pinitase2220552018-07-20 13:23:44 +0100340/** Create a backend layer concatenate function
341 *
342 * @tparam ConcatenateLayerFunction Backend concatenate function
343 * @tparam TargetInfo Target-specific information
344 *
345 * @param[in] node Node to create the backend function for
346 *
347 * @return Backend concatenate layer function
348 */
349template <typename ConcatenateLayerFunction, typename TargetInfo>
350std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
351{
352 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
353 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
354
355 // Return nullptr if depth concatenate is switched off
356 if(!node.is_enabled())
357 {
358 return nullptr;
359 }
360
361 // Extract IO and info
362 std::vector<typename TargetInfo::TensorType *> inputs;
363 for(unsigned int i = 0; i < node.num_inputs(); ++i)
364 {
365 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
366 }
367 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100368 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
369 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100370
371 // Create and configure function
372 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
373 func->configure(inputs, output, concat_axis);
374
375 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000376 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
377 std::ostringstream qss;
378 if(is_quantized)
379 {
380 qss << " Output QuantInfo: " << output->info()->quantization_info();
381 }
Pablo Tello32521432018-11-15 14:43:10 +0000382 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
383 << node.name()
384 << " Type: " << node.type()
385 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100386 << " Data Type: " << output->info()->data_type()
387 << " Shape: " << output->info()->tensor_shape()
388 << " Num Inputs: " << inputs.size()
389 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000390 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100391 << std::endl);
392
393 return std::move(func);
394}
395
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100396/** Create a backend convolution layer function
397 *
398 * @tparam ConvolutionLayerFunctions Backend convolution functions
399 * @tparam TargetInfo Target-specific information
400 *
401 * @param[in] node Node to create the backend function for
402 * @param[in] ctx Graph context
403 *
404 * @return Backend convolution layer function
405 */
406template <typename ConvolutionLayerFunctions, typename TargetInfo>
407std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
408{
409 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
410
411 // Extract IO and info
412 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
413 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
414 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
415 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
416
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100417 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
418
419 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100420 {
421 biases->info()->set_data_type(DataType::S32);
422 }
423
Georgios Pinitas08346e92018-10-16 19:10:46 +0100424 const PadStrideInfo conv_info = node.convolution_info();
425 const unsigned int num_groups = node.num_groups();
426 const ConvolutionMethod conv_algorithm = node.convolution_method();
427 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
428 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100429
430 // Create and configure function (we assume that functions have been validated before creation)
431 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
432 std::unique_ptr<IFunction> func;
433 std::string func_name;
434
Georgios Pinitase2220552018-07-20 13:23:44 +0100435 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100436 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100437 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100438 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
439 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100440 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100441 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100442 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100443 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100444 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100445 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
446 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100447 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100448 }
449 else if(conv_algorithm == ConvolutionMethod::GEMM)
450 {
451 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
452 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100453 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100454 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100455 }
456 else
457 {
458 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
459 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100460 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100461 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100462 }
463
464 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100465 std::ostringstream qss;
466 if(is_quantized)
467 {
468 qss << " Input QuantInfo: " << input->info()->quantization_info()
469 << " Weights QuantInfo: " << weights->info()->quantization_info()
470 << " Output QuantInfo: " << output->info()->quantization_info();
471 }
Pablo Tello32521432018-11-15 14:43:10 +0000472 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
473 << node.name()
474 << " Type: " << func_name
475 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100476 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100477 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100478 << " Input shape: " << input->info()->tensor_shape()
479 << " Weights shape: " << weights->info()->tensor_shape()
480 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000481 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100482 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100483 << std::endl);
484 return func;
485}
486
487/** Create a backend deconvolution layer function
488 *
489 * @tparam DeconvolutionLayerFunction Backend deconvolution function
490 * @tparam TargetInfo Target-specific information
491 *
492 * @param[in] node Node to create the backend function for
493 * @param[in] ctx Graph context
494 *
495 * @return Backend deconvolution layer function
496 */
497template <typename DeconvolutionLayerFunction, typename TargetInfo>
498std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
499{
500 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
501
502 // Extract IO and info
503 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
504 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
505 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
506 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
507
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100508 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100509
510 // Create and configure function (we assume that functions have been validated before creation)
511 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
512 std::unique_ptr<IFunction> func;
513
514 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
515 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100516 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100517
518 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000519 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
520 << node.name()
521 << " Type: " << node.type()
522 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100523 << " Data Type: " << input->info()->data_type()
524 << " Input shape: " << input->info()->tensor_shape()
525 << " Weights shape: " << weights->info()->tensor_shape()
526 << " Output shape: " << output->info()->tensor_shape()
527 << std::endl);
528 return func;
529}
530
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100531/** Create a backend layer depth-wise convolution function
532 *
533 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
534 * @tparam TargetInfo Target-specific information
535 *
536 * @param[in] node Node to create the backend function for
537 *
538 * @return Backend depth-wise convolution layer function
539 */
540template <typename DepthwiseConvolutionLayerFunctions, typename TargetInfo>
541std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
542{
543 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
544
545 // Extract IO and info
546 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
547 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
548 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
549 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
550
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100551 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
552
553 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100554 {
555 biases->info()->set_data_type(DataType::S32);
556 }
557
Georgios Pinitas60e98252018-10-22 16:17:20 +0100558 const PadStrideInfo conv_info = node.convolution_info();
559 const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
Georgios Pinitas05045c12018-12-07 18:31:47 +0000560 const unsigned int depth_multiplier = node.depth_multiplier();
Georgios Pinitas60e98252018-10-22 16:17:20 +0100561 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100562
563 // Create and configure function (we assume that functions have been validated before creation)
564 std::unique_ptr<IFunction> func;
565 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100566 if(dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100567 {
Georgios Pinitas30271c72019-06-24 14:56:34 +0100568 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::OptimizedDepthwiseConvolutionLayer>(
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100569 std::string("DepthwiseConvolutionLayer3x3"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100570 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100571 }
572 else
573 {
574 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::GenericDepthwiseConvolutionLayer>(
575 std::string("DepthwiseConvolutionLayer"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100576 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100577 }
578
579 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100580 std::ostringstream qss;
581 if(is_quantized)
582 {
583 qss << " Input QuantInfo: " << input->info()->quantization_info()
584 << " Weights QuantInfo: " << weights->info()->quantization_info()
585 << " Output QuantInfo: " << output->info()->quantization_info();
586 }
Pablo Tello32521432018-11-15 14:43:10 +0000587 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
588 << node.name()
589 << " Type: " << func_name
590 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100591 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100592 << " Input shape: " << input->info()->tensor_shape()
593 << " Weights shape: " << weights->info()->tensor_shape()
594 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000595 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000596 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100597 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100598 << std::endl);
599 return func;
600}
601
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000602/** Create a backend detection output layer function
603 *
604 * @tparam DetectionOutputLayer Function Backend detection output function
605 * @tparam TargetInfo Target-specific information
606 *
607 * @param[in] node Node to create the backend function for
608 *
609 * @return Backend detection output layer function
610 */
611template <typename DetectionOutputLayerFunction, typename TargetInfo>
612std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
613{
614 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
615
616 // Extract IO and info
617 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
618 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
619 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
620 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
621 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
622
623 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
624 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
625 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
626 ARM_COMPUTE_ERROR_ON(output == nullptr);
627
628 // Create and configure function
629 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
630 func->configure(input0, input1, input2, output, detect_info);
631
632 // Log info
633 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
634 << node.name()
635 << " Type: " << node.type()
636 << " Target: " << TargetInfo::TargetType
637 << " Data Type: " << input0->info()->data_type()
638 << " Input0 shape: " << input0->info()->tensor_shape()
639 << " Input1 shape: " << input1->info()->tensor_shape()
640 << " Input2 shape: " << input2->info()->tensor_shape()
641 << " Output shape: " << output->info()->tensor_shape()
642 << " DetectionOutputLayer info: " << detect_info
643 << std::endl);
644
645 return std::move(func);
646}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000647
648/** Create a backend detection post process layer function
649 *
650 * @tparam DetectionPostProcessLayerFunction Backend detection output function
651 * @tparam TargetInfo Target-specific information
652 *
653 * @param[in] node Node to create the backend function for
654 *
655 * @return Backend detection post process layer function
656 */
657template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
658std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
659{
660 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
661
662 // Extract IO and info
663 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
664 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
665 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
666 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
667 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
668 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
669 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
670 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
671
672 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
673 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
674 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
675 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
676 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
677 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
678 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
679
680 // Create and configure function
681 auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
682 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
683
684 // Log info
685 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
686 << node.name()
687 << " Type: " << node.type()
688 << " Target: " << TargetInfo::TargetType
689 << " Data Type: " << input0->info()->data_type()
690 << " Input0 shape: " << input0->info()->tensor_shape()
691 << " Input1 shape: " << input1->info()->tensor_shape()
692 << " Input2 shape: " << input2->info()->tensor_shape()
693 << " Output0 shape: " << output0->info()->tensor_shape()
694 << " Output1 shape: " << output1->info()->tensor_shape()
695 << " Output2 shape: " << output2->info()->tensor_shape()
696 << " Output3 shape: " << output3->info()->tensor_shape()
697 << " DetectionPostProcessLayer info: " << detect_info
698 << std::endl);
699
700 return std::move(func);
701}
702
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100703/** Create a backend element-wise operation layer function
704 *
705 * @tparam EltwiseFunctions Backend element-wise function
706 * @tparam TargetInfo Target-specific information
707 *
708 * @param[in] node Node to create the backend function for
709 *
710 * @return Backend element-wise operation layer function
711 */
712template <typename EltwiseFunctions, typename TargetInfo>
713std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
714{
715 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
716
717 // Extract IO and info
718 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
719 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
720 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
721 const EltwiseOperation eltwise_op = node.eltwise_operation();
722 const ConvertPolicy convert_policy = node.convert_policy();
723 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
724 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
725 ARM_COMPUTE_ERROR_ON(output == nullptr);
726
727 std::unique_ptr<IFunction> func = nullptr;
728 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100729 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100730 {
731 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
732 std::string("ArithmeticAddition"),
733 input1, input2, output, convert_policy);
734 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100735 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100736 {
737 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
738 std::string("ArithmeticSubtraction"),
739 input1, input2, output, convert_policy);
740 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100741 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100742 {
743 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
744 std::string("PixelWiseMultiplication"),
745 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
746 }
747 else
748 {
749 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
750 }
751
752 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000753 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
754 << node.name()
755 << " Type: " << node.type()
756 << " Target: " << TargetInfo::TargetType
757 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100758 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000759 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100760 << std::endl);
761
762 return func;
763}
764
765/** Create a backend flatten layer function
766 *
767 * @tparam FlattenLayerFunction Backend flatten function
768 * @tparam TargetInfo Target-specific information
769 *
770 * @param[in] node Node to create the backend function for
771 *
772 * @return Backend flatten layer function
773 */
774template <typename FlattenLayerFunction, typename TargetInfo>
775std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
776{
777 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
778
779 // Extract IO and info
780 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
781 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
782
Georgios Pinitase2220552018-07-20 13:23:44 +0100783 ARM_COMPUTE_ERROR_ON(input == nullptr);
784 ARM_COMPUTE_ERROR_ON(output == nullptr);
785
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100786 // Create and configure function
787 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
788 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100789
790 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000791 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
792 << node.name()
793 << " Type: " << node.type()
794 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100795 << " Data Type: " << input->info()->data_type()
796 << " Input shape: " << input->info()->tensor_shape()
797 << " Output shape: " << output->info()->tensor_shape()
798 << std::endl);
799
800 return std::move(func);
801}
802
803/** Create a backend fully connected layer function
804 *
805 * @tparam FullyConnectedLayerFunction Backend fully-connected function
806 * @tparam TargetInfo Target-specific information
807 *
808 * @param[in] node Node to create the backend function for
809 * @param[in] ctx Graph context
810 *
811 * @return Backend fully connected layer function
812 */
813template <typename FullyConnectedLayerFunction, typename TargetInfo>
814std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
815{
816 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
817
818 // Extract IO and info
819 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
820 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
821 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
822 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100823 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100824
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100825 ARM_COMPUTE_ERROR_ON(input == nullptr);
826 ARM_COMPUTE_ERROR_ON(weights == nullptr);
827 ARM_COMPUTE_ERROR_ON(output == nullptr);
828
Georgios Pinitase2220552018-07-20 13:23:44 +0100829 // Create and configure function
830 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
831 func->configure(input, weights, biases, output, fc_info);
832
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100833 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
834
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100835 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100836 std::ostringstream qss;
837 if(is_quantized)
838 {
839 qss << " Input QuantInfo: " << input->info()->quantization_info()
840 << " Weights QuantInfo: " << weights->info()->quantization_info()
841 << " Output QuantInfo: " << output->info()->quantization_info();
842 }
Pablo Tello32521432018-11-15 14:43:10 +0000843 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
844 << node.name()
845 << " Type: " << node.type()
846 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100847 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100848 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100849 << " Input shape: " << input->info()->tensor_shape()
850 << " Weights shape: " << weights->info()->tensor_shape()
851 << " Output shape: " << output->info()->tensor_shape()
852 << std::endl);
853
854 return std::move(func);
855}
856
Manuel Bottini5209be52019-02-13 16:34:56 +0000857/** Create a backend generate proposals layer function
858 *
859 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
860 * @tparam TargetInfo Target-specific information
861 *
862 * @param[in] node Node to create the backend function for
863 * @param[in] ctx Graph context
864 *
865 * @return Backend generate proposals layer function
866 */
867template <typename GenerateProposalsLayerFunction, typename TargetInfo>
868std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
869{
870 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
871
872 // Extract IO and info
873 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
874 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
875 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
876 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
877 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
878 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
879 const GenerateProposalsInfo info = node.info();
880
881 ARM_COMPUTE_ERROR_ON(scores == nullptr);
882 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
883 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
884 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
885 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
886
887 // Create and configure function
888 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
889 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
890
891 // Log info
892 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
893 << " Target " << TargetInfo::TargetType
894 << " Data Type: " << scores->info()->data_type()
895 << " Scores shape: " << scores->info()->tensor_shape()
896 << " Deltas shape: " << deltas->info()->tensor_shape()
897 << " Anchors shape: " << anchors->info()->tensor_shape()
898 << " Proposals shape: " << proposals->info()->tensor_shape()
899 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
900 << " Scores Out shape: " << scores_out->info()->tensor_shape()
901 << std::endl);
902
903 return std::move(func);
904}
905
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100906/** Create a backend normalization layer function
907 *
908 * @tparam NormalizationLayerFunction Backend normalization function
909 * @tparam TargetInfo Target-specific information
910 *
911 * @param[in] node Node to create the backend function for
912 * @param[in] ctx Graph context
913 *
914 * @return Backend normalization layer function
915 */
916template <typename NormalizationLayerFunction, typename TargetInfo>
917std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
918{
919 ARM_COMPUTE_UNUSED(ctx);
920
921 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
922
923 // Extract IO and info
924 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
925 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
926 const NormalizationLayerInfo norm_info = node.normalization_info();
927 ARM_COMPUTE_ERROR_ON(input == nullptr);
928 ARM_COMPUTE_ERROR_ON(output == nullptr);
929
930 // Create and configure function
931 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
932 func->configure(input, output, norm_info);
933
934 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000935 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
936 << node.name()
937 << " Type: " << node.type()
938 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100939 << " Data Type: " << input->info()->data_type()
940 << " Input shape: " << input->info()->tensor_shape()
941 << " Output shape: " << output->info()->tensor_shape()
942 << " Normalization info: " << norm_info.type()
943 << std::endl);
944
945 return std::move(func);
946}
947
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100948/** Create a backend normalize planar YUV layer function
949 *
950 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
951 * @tparam TargetInfo Target-specific information
952 *
953 * @param[in] node Node to create the backend function for
954 *
955 * @return Backend normalize plnar YUV layer function
956 */
957template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
958std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
959{
960 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
961
962 // Extract IO and info
963 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
964 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
965 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
966 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
967 ARM_COMPUTE_ERROR_ON(input == nullptr);
968 ARM_COMPUTE_ERROR_ON(mean == nullptr);
969 ARM_COMPUTE_ERROR_ON(std == nullptr);
970 ARM_COMPUTE_ERROR_ON(output == nullptr);
971
972 // Create and configure function
973 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
974 func->configure(input, output, mean, std);
975
976 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000977 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
978 << node.name()
979 << " Type: " << node.type()
980 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100981 << " Data Type: " << input->info()->data_type()
982 << " Shape: " << input->info()->tensor_shape()
983 << std::endl);
984
985 return std::move(func);
986}
987
Michele Di Giorgio4bb17332018-09-26 13:56:51 +0100988/** Create a backend pad layer function
989 *
990 * @tparam PadLayerFunction Backend pad function
991 * @tparam TargetInfo Target-specific information
992 *
993 * @param[in] node Node to create the backend function for
994 *
995 * @return Backend pad layer function
996 */
997template <typename PadLayerFunction, typename TargetInfo>
998std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
999{
1000 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1001
1002 // Extract IO and info
1003 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1004 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1005 const PaddingList &padding = node.padding();
1006 ARM_COMPUTE_ERROR_ON(input == nullptr);
1007 ARM_COMPUTE_ERROR_ON(output == nullptr);
1008
1009 // Create and configure function
1010 auto func = support::cpp14::make_unique<PadLayerFunction>();
1011 func->configure(input, output, padding);
1012
1013 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001014 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1015 << node.name()
1016 << " Type: " << node.type()
1017 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001018 << " Data Type: " << input->info()->data_type()
1019 << " Input shape: " << input->info()->tensor_shape()
1020 << " Output shape: " << output->info()->tensor_shape()
1021 << std::endl);
1022
1023 return std::move(func);
1024}
1025
Georgios Pinitas57c48242018-08-02 13:41:49 +01001026/** Create a backend permute layer function
1027 *
1028 * @tparam PermuteLayerFunction Backend permute function
1029 * @tparam TargetInfo Target-specific information
1030 *
1031 * @param[in] node Node to create the backend function for
1032 *
1033 * @return Backend permute layer function
1034 */
1035template <typename PermuteLayerFunction, typename TargetInfo>
1036std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1037{
1038 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1039
1040 // Extract IO and info
1041 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1042 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1043 const PermutationVector &perm = node.permutation_vector();
1044 ARM_COMPUTE_ERROR_ON(input == nullptr);
1045 ARM_COMPUTE_ERROR_ON(output == nullptr);
1046
1047 // Create and configure function
1048 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1049 func->configure(input, output, perm);
1050
1051 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001052 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1053 << node.name()
1054 << " Type: " << node.type()
1055 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001056 << " Data Type: " << input->info()->data_type()
1057 << " Input shape: " << input->info()->tensor_shape()
1058 << " Output shape: " << output->info()->tensor_shape()
1059 << " Permutation vector: " << perm
1060 << std::endl);
1061
1062 return std::move(func);
1063}
1064
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001065/** Create a backend pooling layer function
1066 *
1067 * @tparam PoolingLayerFunction Backend pooling function
1068 * @tparam TargetInfo Target-specific information
1069 *
1070 * @param[in] node Node to create the backend function for
1071 *
1072 * @return Backend pooling layer function
1073 */
1074template <typename PoolingLayerFunction, typename TargetInfo>
1075std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1076{
1077 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1078
1079 // Extract IO and info
1080 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1081 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1082 const PoolingLayerInfo pool_info = node.pooling_info();
1083 ARM_COMPUTE_ERROR_ON(input == nullptr);
1084 ARM_COMPUTE_ERROR_ON(output == nullptr);
1085
1086 // Create and configure function
1087 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1088 func->configure(input, output, pool_info);
1089
1090 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001091 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1092 << node.name()
1093 << " Type: " << node.type()
1094 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001095 << " Data Type: " << input->info()->data_type()
1096 << " Input shape: " << input->info()->tensor_shape()
1097 << " Output shape: " << output->info()->tensor_shape()
1098 << " Pooling info: " << pool_info.pool_type()
1099 << std::endl);
1100
1101 return std::move(func);
1102}
1103
Pablo Tello32521432018-11-15 14:43:10 +00001104/** Create a backend priorbox layer function
1105 *
1106 * @tparam PriorBoxLayerFunction Backend priorbox function
1107 * @tparam TargetInfo Target-specific information
1108 *
1109 * @param[in] node Node to create the backend function for
1110 *
1111 * @return Backend priorbox layer function
1112 */
1113template <typename PriorBoxLayerFunction, typename TargetInfo>
1114std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1115{
1116 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1117
1118 // Extract IO and info
1119 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1120 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1121 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1122 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1123 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1124 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1125 ARM_COMPUTE_ERROR_ON(output == nullptr);
1126
1127 // Create and configure function
1128 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1129 func->configure(input0, input1, output, prior_info);
1130
1131 // Log info
1132 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1133 << node.name()
1134 << " Type: " << node.type()
1135 << " Target: " << TargetInfo::TargetType
1136 << " Data Type: " << input0->info()->data_type()
1137 << " Input0 shape: " << input0->info()->tensor_shape()
1138 << " Input1 shape: " << input1->info()->tensor_shape()
1139 << " Output shape: " << output->info()->tensor_shape()
1140 << " PriorBoxLayer info: " << prior_info
1141 << std::endl);
1142
1143 return std::move(func);
1144}
1145
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001146/** Create a backend quantization layer function
1147 *
1148 * @tparam QuantizationLayerFunction Backend quantization function
1149 * @tparam TargetInfo Target-specific information
1150 *
1151 * @param[in] node Node to create the backend function for
1152 *
1153 * @return Backend quantization layer function
1154 */
1155template <typename QuantizationLayerFunction, typename TargetInfo>
1156std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1157{
1158 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1159
1160 // Extract IO and info
1161 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1162 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1163 ARM_COMPUTE_ERROR_ON(input == nullptr);
1164 ARM_COMPUTE_ERROR_ON(output == nullptr);
1165
1166 // Create and configure function
1167 auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1168 func->configure(input, output);
1169
1170 // Log info
1171 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1172 << node.name()
1173 << " Type: " << node.type()
1174 << " Target: " << TargetInfo::TargetType
1175 << " Data Type: " << input->info()->data_type()
1176 << " Input shape: " << input->info()->tensor_shape()
1177 << " Output shape: " << output->info()->tensor_shape()
1178 << std::endl);
1179
1180 return std::move(func);
1181}
1182
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001183/** Create a backend reorg layer function
1184 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001185 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001186 * @tparam TargetInfo Target-specific information
1187 *
1188 * @param[in] node Node to create the backend function for
1189 *
1190 * @return Backend reshape layer function
1191 */
1192template <typename ReorgLayerFunction, typename TargetInfo>
1193std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1194{
1195 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1196
1197 // Extract IO and info
1198 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1199 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1200 ARM_COMPUTE_ERROR_ON(input == nullptr);
1201 ARM_COMPUTE_ERROR_ON(output == nullptr);
1202
1203 // Create and configure function
1204 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1205 func->configure(input, output, node.stride());
1206
1207 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001208 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1209 << node.name()
1210 << " Type: " << node.type()
1211 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001212 << " Data Type: " << input->info()->data_type()
1213 << " Input shape: " << input->info()->tensor_shape()
1214 << " Output shape: " << output->info()->tensor_shape()
1215 << std::endl);
1216
1217 return std::move(func);
1218}
1219
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001220/** Create a backend reshape layer function
1221 *
1222 * @tparam ReshapeLayerFunction Backend reshape function
1223 * @tparam TargetInfo Target-specific information
1224 *
1225 * @param[in] node Node to create the backend function for
1226 *
1227 * @return Backend reshape layer function
1228 */
1229template <typename ReshapeLayerFunction, typename TargetInfo>
1230std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1231{
1232 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1233
1234 // Extract IO and info
1235 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1236 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1237 ARM_COMPUTE_ERROR_ON(input == nullptr);
1238 ARM_COMPUTE_ERROR_ON(output == nullptr);
1239
1240 // Create and configure function
1241 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1242 func->configure(input, output);
1243
1244 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001245 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1246 << node.name()
1247 << " Type: " << node.type()
1248 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001249 << " Data Type: " << input->info()->data_type()
1250 << " Input shape: " << input->info()->tensor_shape()
1251 << " Output shape: " << output->info()->tensor_shape()
1252 << std::endl);
1253
1254 return std::move(func);
1255}
1256
1257/** Create a backend resize layer function
1258 *
1259 * @tparam ResizeLayerFunction Backend resize function
1260 * @tparam TargetInfo Target-specific information
1261 *
1262 * @param[in] node Node to create the backend function for
1263 *
1264 * @return Backend resize layer function
1265 */
1266template <typename ResizeLayerFunction, typename TargetInfo>
1267std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1268{
1269 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1270
1271 // Extract IO and info
1272 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1273 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1274 ARM_COMPUTE_ERROR_ON(input == nullptr);
1275 ARM_COMPUTE_ERROR_ON(output == nullptr);
1276 const InterpolationPolicy policy = node.policy();
1277
1278 // Create and configure function
1279 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
1280 func->configure(input, output, policy, BorderMode::CONSTANT);
1281
1282 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001283 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1284 << node.name()
1285 << " Type: " << node.type()
1286 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001287 << " Data Type: " << input->info()->data_type()
1288 << " Input shape: " << input->info()->tensor_shape()
1289 << " Output shape: " << output->info()->tensor_shape()
1290 << " Interpolation: " << policy
1291 << std::endl);
1292
1293 return std::move(func);
1294}
1295
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001296/** Create a backend ROI align layer function
1297 *
1298 * @tparam ROIAlignLayerFunction ROI Align function
1299 * @tparam TargetInfo Target-specific information
1300 *
1301 * @param[in] node Node to create the backend function for
1302 *
1303 * @return ROI Align layer function
1304 */
1305template <typename ROIAlignLayerFunction, typename TargetInfo>
1306std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1307{
1308 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1309
1310 // Extract IO and info
1311 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1312 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1313 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1314 ARM_COMPUTE_ERROR_ON(input == nullptr);
1315 ARM_COMPUTE_ERROR_ON(output == nullptr);
1316 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1317
1318 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1319
1320 // Create and configure function
1321 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1322
1323 func->configure(input, rois, output, pool_info);
1324
1325 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001326 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1327 << node.name()
1328 << " Type: " << node.type()
1329 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001330 << " Data Type: " << input->info()->data_type()
1331 << " Input shape: " << input->info()->tensor_shape()
1332 << " Output shape: " << output->info()->tensor_shape()
1333 << " ROIs shape: " << rois->info()->tensor_shape()
1334 << " ROIPooling width: " << pool_info.pooled_width()
1335 << " ROIPooling height: " << pool_info.pooled_height()
1336 << std::endl);
1337
1338 return std::move(func);
1339}
1340
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001341/** Create a backend slice layer function
1342 *
1343 * @tparam SliceLayerFunction Backend slice function
1344 * @tparam TargetInfo Target-specific information
1345 *
1346 * @param[in] node Node to create the backend function for
1347 *
1348 * @return Backend slice layer function
1349 */
1350template <typename SliceLayerFunction, typename TargetInfo>
1351std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1352{
1353 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1354
1355 // Extract IO and info
1356 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1357 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1358 ARM_COMPUTE_ERROR_ON(input == nullptr);
1359 ARM_COMPUTE_ERROR_ON(output == nullptr);
1360
1361 // Create and configure function
1362 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1363 func->configure(input, output, node.starts(), node.ends());
1364
1365 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001366 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1367 << node.name()
1368 << " Type: " << node.type()
1369 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001370 << " Data Type: " << input->info()->data_type()
1371 << " Input shape: " << input->info()->tensor_shape()
1372 << " Output shape: " << output->info()->tensor_shape()
1373 << std::endl);
1374
1375 return std::move(func);
1376}
1377
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001378/** Create a backend softmax layer function
1379 *
1380 * @tparam SoftmaxLayerFunction Backend softmax function
1381 * @tparam TargetInfo Target-specific information
1382 *
1383 * @param[in] node Node to create the backend function for
1384 * @param[in] ctx Graph context
1385 *
1386 * @return Backend softmax layer function
1387 */
1388template <typename SoftmaxLayerFunction, typename TargetInfo>
1389std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1390{
1391 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1392
1393 // Extract IO and info
1394 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1395 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1396 const float beta = node.beta();
1397 ARM_COMPUTE_ERROR_ON(input == nullptr);
1398 ARM_COMPUTE_ERROR_ON(output == nullptr);
1399
1400 // Create and configure function
1401 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1402 func->configure(input, output, beta);
1403
1404 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001405 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1406 << node.name()
1407 << " Type: " << node.type()
1408 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001409 << " Data Type: " << input->info()->data_type()
1410 << " Input shape: " << input->info()->tensor_shape()
1411 << " Output shape: " << output->info()->tensor_shape()
1412 << std::endl);
1413
1414 return std::move(func);
1415}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001416
1417/** Create a backend layer stack function
1418 *
1419 * @tparam StackLayerFunction Backend stack function
1420 * @tparam TargetInfo Target-specific information
1421 *
1422 * @param[in] node Node to create the backend function for
1423 *
1424 * @return Backend stack layer function
1425 */
1426template <typename StackLayerFunction, typename TargetInfo>
1427std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1428{
1429 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1430 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1431
1432 // Extract IO and info
1433 std::vector<typename TargetInfo::TensorType *> inputs;
1434 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1435 {
1436 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1437 }
1438 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1439 const int axis = node.axis();
1440
1441 // Create and configure function
1442 auto func = support::cpp14::make_unique<StackLayerFunction>();
1443 func->configure(inputs, axis, output);
1444
1445 // Log info
1446 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1447 << node.name()
1448 << " Type: " << node.type()
1449 << " Target: " << TargetInfo::TargetType
1450 << " Data Type: " << output->info()->data_type()
1451 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1452 << " Output shape: " << output->info()->tensor_shape()
1453 << " Num Inputs: " << inputs.size()
1454 << " Axis: " << axis
1455 << std::endl);
1456
1457 return std::move(func);
1458}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001459/** Create a backend Upsample layer function
1460 *
1461 * @tparam UpsampleLayerFunction Backend Upsample function
1462 * @tparam TargetInfo Target-specific information
1463 *
1464 * @param[in] node Node to create the backend function for
1465 * @param[in] ctx Graph context
1466 *
1467 * @return Backend Upsample layer function
1468 */
1469template <typename UpsampleLayerFunction, typename TargetInfo>
1470std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1471{
1472 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1473
1474 // Extract IO and info
1475 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1476 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1477 const Size2D info = node.info();
1478 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1479 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1480 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1481 ARM_COMPUTE_ERROR_ON(input == nullptr);
1482 ARM_COMPUTE_ERROR_ON(output == nullptr);
1483
1484 // Create and configure function
1485 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1486 func->configure(input, output, info, upsampling_policy);
1487
1488 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001489 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1490 << node.name()
1491 << " Type: " << node.type()
1492 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001493 << " Data Type: " << input->info()->data_type()
1494 << " Input shape: " << input->info()->tensor_shape()
1495 << " Output shape: " << output->info()->tensor_shape()
1496 << " Strides: " << info
1497 << " Upsampling policy: " << upsampling_policy
1498 << std::endl);
1499
1500 return std::move(func);
1501}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001502/** Create a backend YOLO layer function
1503 *
1504 * @tparam YoloLayerFunction Backend YOLO function
1505 * @tparam TargetInfo Target-specific information
1506 *
1507 * @param[in] node Node to create the backend function for
1508 * @param[in] ctx Graph context
1509 *
1510 * @return Backend YOLO layer function
1511 */
1512template <typename YOLOlayerFunction, typename TargetInfo>
1513std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1514{
1515 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1516
1517 // Extract IO and info
1518 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1519 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1520 const ActivationLayerInfo act_info = node.activation_info();
1521 const int32_t num_classes = node.num_classes();
1522 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1523 ARM_COMPUTE_ERROR_ON(input == nullptr);
1524 ARM_COMPUTE_ERROR_ON(output == nullptr);
1525
1526 // Create and configure function
1527 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1528 func->configure(input, output, act_info, num_classes);
1529
1530 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001531 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1532 << node.name()
1533 << " Type: " << node.type()
1534 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001535 << " Data Type: " << input->info()->data_type()
1536 << " Input shape: " << input->info()->tensor_shape()
1537 << " Output shape: " << output->info()->tensor_shape()
1538 << " Activation function: " << act_info.activation()
1539 << " Num classes: " << num_classes
1540 << std::endl);
1541
1542 return std::move(func);
1543}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001544} // namespace detail
1545} // namespace backends
1546} // namespace graph
1547} // namespace arm_compute
1548
1549#endif /* __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__ */