blob: 1968ec3923a67c678450f3f1d3d634d081ed234d [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
25#define __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
26
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
31#include "arm_compute/graph/backends/Utils.h"
32#include "arm_compute/graph/nodes/Nodes.h"
33
34#include "arm_compute/core/Error.h"
35#include "arm_compute/core/Helpers.h"
36#include "arm_compute/core/ITensorInfo.h"
37#include "arm_compute/core/utils/misc/Cast.h"
38
39namespace arm_compute
40{
41namespace graph
42{
43namespace backends
44{
45namespace detail
46{
47/** Returns backing tensor of a given tensor
48 *
49 * @tparam TargetInfo Target information
50 *
51 * @param[in] tensor Tensor to extract the backing tensor from
52 *
53 * @return Backing tensor if present else nullptr
54 */
55template <typename TargetInfo>
56typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
57{
58 typename TargetInfo::TensorType *backing_tensor = nullptr;
59 if(tensor != nullptr)
60 {
61 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
62 // Get backing tensor handle
63 ITensorHandle *tensor_handle = tensor->handle();
64 // Get backing tensor
65 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
66 }
67
68 return backing_tensor;
69}
70
71template <typename TargetInfo>
72void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
73{
74 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
75 << " Target : " << TargetInfo::TargetType
76 << " ID : " << node.id()
77 << " Name: " << node.name()
78 << std::endl);
79
80 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
81 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
82 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
83}
84
85/** Creates a backend activation layer function
86 *
87 * @tparam ActivationLayerFunction Backend activation function
88 * @tparam TargetInfo Target-specific information
89 *
90 * @param[in] node Node to create the backend function for
91 *
92 * @return Backend activation layer function
93 */
94template <typename ActivationLayerFunction, typename TargetInfo>
95std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
96{
97 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
98
99 // Extract IO and info
100 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
101 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
102 const ActivationLayerInfo act_info = node.activation_info();
103
104 // Create function
105 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
106 func->configure(input, output, act_info);
107
108 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
109 << " Target " << TargetInfo::TargetType
110 << " Data Type: " << input->info()->data_type()
111 << " Shape: " << input->info()->tensor_shape()
112 << " Activation function: " << act_info.activation()
113 << " a: " << act_info.a()
114 << " b: " << act_info.b()
115 << " InPlace : " << is_in_place_operation(input, output)
116 << std::endl);
117
118 return std::move(func);
119}
120
121/** Create a backend batch normalization layer function
122 *
123 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
124 * @tparam TargetInfo Target-specific information
125 *
126 * @param[in] node Node to create the backend function for
127 *
128 * @return Backend batch normalization layer function
129 */
130template <typename BatchNormalizationLayerFunction, typename TargetInfo>
131std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
132{
133 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
134
135 // Extract IO and info
136 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
137 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
138 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
139 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
140 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
141 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
142 const float epsilon = node.epsilon();
143 const ActivationLayerInfo fused_act = node.fused_activation();
144
145 // Create and configure function
146 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
147 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
148
149 // Log info
150 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
151 << " Target " << TargetInfo::TargetType
152 << " Data Type: " << input->info()->data_type()
153 << " Shape: " << input->info()->tensor_shape()
154 << " Epsilon: " << epsilon << " "
155 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
156 << " InPlace : " << is_in_place_operation(input, output)
157 << std::endl);
158
159 return std::move(func);
160}
161
162/** Create a backend channel shuffle layer function
163 *
164 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
165 * @tparam TargetInfo Target-specific information
166 *
167 * @param[in] node Node to create the backend function for
168 *
169 * @return Backend channel shuffle layer function
170 */
171template <typename ChannelShuffleLayerFunction, typename TargetInfo>
172std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
173{
174 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
175
176 // Extract IO and info
177 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
178 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
179 const unsigned int num_groups = node.num_groups();
180
181 // Create function
182 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
183 func->configure(input, output, num_groups);
184
185 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
186 << " Target " << TargetInfo::TargetType
187 << " Data Type: " << input->info()->data_type()
188 << " Shape: " << input->info()->tensor_shape()
189 << " Num groups: " << num_groups
190 << std::endl);
191
192 return std::move(func);
193}
194
Georgios Pinitase2220552018-07-20 13:23:44 +0100195/** Create a backend layer concatenate function
196 *
197 * @tparam ConcatenateLayerFunction Backend concatenate function
198 * @tparam TargetInfo Target-specific information
199 *
200 * @param[in] node Node to create the backend function for
201 *
202 * @return Backend concatenate layer function
203 */
204template <typename ConcatenateLayerFunction, typename TargetInfo>
205std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
206{
207 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
208 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
209
210 // Return nullptr if depth concatenate is switched off
211 if(!node.is_enabled())
212 {
213 return nullptr;
214 }
215
216 // Extract IO and info
217 std::vector<typename TargetInfo::TensorType *> inputs;
218 for(unsigned int i = 0; i < node.num_inputs(); ++i)
219 {
220 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
221 }
222 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
223 const DataLayoutDimension concat_axis = node.concatenation_axis();
224
225 // Create and configure function
226 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
227 func->configure(inputs, output, concat_axis);
228
229 // Log info
230 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
231 << " Target " << TargetInfo::TargetType
232 << " Data Type: " << output->info()->data_type()
233 << " Shape: " << output->info()->tensor_shape()
234 << " Num Inputs: " << inputs.size()
235 << " Axis: " << concat_axis
236 << std::endl);
237
238 return std::move(func);
239}
240
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100241/** Create a backend convolution layer function
242 *
243 * @tparam ConvolutionLayerFunctions Backend convolution functions
244 * @tparam TargetInfo Target-specific information
245 *
246 * @param[in] node Node to create the backend function for
247 * @param[in] ctx Graph context
248 *
249 * @return Backend convolution layer function
250 */
251template <typename ConvolutionLayerFunctions, typename TargetInfo>
252std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
253{
254 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
255
256 // Extract IO and info
257 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
258 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
259 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
260 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
261
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100262 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
263
264 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100265 {
266 biases->info()->set_data_type(DataType::S32);
267 }
268
Georgios Pinitas08346e92018-10-16 19:10:46 +0100269 const PadStrideInfo conv_info = node.convolution_info();
270 const unsigned int num_groups = node.num_groups();
271 const ConvolutionMethod conv_algorithm = node.convolution_method();
272 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
273 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100274
275 // Create and configure function (we assume that functions have been validated before creation)
276 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
277 std::unique_ptr<IFunction> func;
278 std::string func_name;
279
Georgios Pinitase2220552018-07-20 13:23:44 +0100280 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100281 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100282 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100283 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
284 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100285 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100286 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100287 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100288 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100289 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100290 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
291 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100292 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100293 }
294 else if(conv_algorithm == ConvolutionMethod::GEMM)
295 {
296 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
297 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100298 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100299 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100300 }
301 else
302 {
303 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
304 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100305 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100306 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100307 }
308
309 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100310 std::ostringstream qss;
311 if(is_quantized)
312 {
313 qss << " Input QuantInfo: " << input->info()->quantization_info()
314 << " Weights QuantInfo: " << weights->info()->quantization_info()
315 << " Output QuantInfo: " << output->info()->quantization_info();
316 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100317 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
318 << " Target " << TargetInfo::TargetType
319 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100320 << " Groups: " << num_groups
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100321 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100322 << " Input shape: " << input->info()->tensor_shape()
323 << " Weights shape: " << weights->info()->tensor_shape()
324 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100325 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100326 << std::endl);
327 return func;
328}
329
330/** Create a backend deconvolution layer function
331 *
332 * @tparam DeconvolutionLayerFunction Backend deconvolution function
333 * @tparam TargetInfo Target-specific information
334 *
335 * @param[in] node Node to create the backend function for
336 * @param[in] ctx Graph context
337 *
338 * @return Backend deconvolution layer function
339 */
340template <typename DeconvolutionLayerFunction, typename TargetInfo>
341std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
342{
343 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
344
345 // Extract IO and info
346 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
347 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
348 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
349 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
350
351 const PadStrideInfo deconv_info = node.deconvolution_info();
352 const Size2D inner_border = node.inner_border();
353
354 // Create and configure function (we assume that functions have been validated before creation)
355 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
356 std::unique_ptr<IFunction> func;
357
358 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
359 std::string(), mm,
360 input, weights, biases, output, deconv_info, inner_border.x(), inner_border.y());
361
362 // Log info
363 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
364 << " Target " << TargetInfo::TargetType
365 << " Data Type: " << input->info()->data_type()
366 << " Input shape: " << input->info()->tensor_shape()
367 << " Weights shape: " << weights->info()->tensor_shape()
368 << " Output shape: " << output->info()->tensor_shape()
369 << std::endl);
370 return func;
371}
372
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100373/** Create a backend layer depth-wise convolution function
374 *
375 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
376 * @tparam TargetInfo Target-specific information
377 *
378 * @param[in] node Node to create the backend function for
379 *
380 * @return Backend depth-wise convolution layer function
381 */
382template <typename DepthwiseConvolutionLayerFunctions, typename TargetInfo>
383std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
384{
385 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
386
387 // Extract IO and info
388 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
389 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
390 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
391 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
392
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100393 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
394
395 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100396 {
397 biases->info()->set_data_type(DataType::S32);
398 }
399
Georgios Pinitas60e98252018-10-22 16:17:20 +0100400 const PadStrideInfo conv_info = node.convolution_info();
401 const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
402 const unsigned int depth_multiplier = 1;
403 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100404
405 // Create and configure function (we assume that functions have been validated before creation)
406 std::unique_ptr<IFunction> func;
407 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100408 if(dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100409 {
410 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::DepthwiseConvolutionLayer3x3>(
411 std::string("DepthwiseConvolutionLayer3x3"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100412 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100413 }
414 else
415 {
416 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::GenericDepthwiseConvolutionLayer>(
417 std::string("DepthwiseConvolutionLayer"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100418 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100419 }
420
421 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100422 std::ostringstream qss;
423 if(is_quantized)
424 {
425 qss << " Input QuantInfo: " << input->info()->quantization_info()
426 << " Weights QuantInfo: " << weights->info()->quantization_info()
427 << " Output QuantInfo: " << output->info()->quantization_info();
428 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100429 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
430 << " Target " << TargetInfo::TargetType
431 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100432 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100433 << " Input shape: " << input->info()->tensor_shape()
434 << " Weights shape: " << weights->info()->tensor_shape()
435 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100436 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100437 << std::endl);
438 return func;
439}
440
441/** Create a backend element-wise operation layer function
442 *
443 * @tparam EltwiseFunctions Backend element-wise function
444 * @tparam TargetInfo Target-specific information
445 *
446 * @param[in] node Node to create the backend function for
447 *
448 * @return Backend element-wise operation layer function
449 */
450template <typename EltwiseFunctions, typename TargetInfo>
451std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
452{
453 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
454
455 // Extract IO and info
456 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
457 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
458 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
459 const EltwiseOperation eltwise_op = node.eltwise_operation();
460 const ConvertPolicy convert_policy = node.convert_policy();
461 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
462 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
463 ARM_COMPUTE_ERROR_ON(output == nullptr);
464
465 std::unique_ptr<IFunction> func = nullptr;
466 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100467 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100468 {
469 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
470 std::string("ArithmeticAddition"),
471 input1, input2, output, convert_policy);
472 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100473 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100474 {
475 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
476 std::string("ArithmeticSubtraction"),
477 input1, input2, output, convert_policy);
478 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100479 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100480 {
481 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
482 std::string("PixelWiseMultiplication"),
483 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
484 }
485 else
486 {
487 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
488 }
489
490 // Log info
491 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
492 << " Target " << TargetInfo::TargetType
493 << " Operation " << func_name
494 << " Data Type: " << input1->info()->data_type()
495 << " Shape : " << input1->info()->tensor_shape()
496 << std::endl);
497
498 return func;
499}
500
501/** Create a backend flatten layer function
502 *
503 * @tparam FlattenLayerFunction Backend flatten function
504 * @tparam TargetInfo Target-specific information
505 *
506 * @param[in] node Node to create the backend function for
507 *
508 * @return Backend flatten layer function
509 */
510template <typename FlattenLayerFunction, typename TargetInfo>
511std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
512{
513 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
514
515 // Extract IO and info
516 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
517 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
518
Georgios Pinitase2220552018-07-20 13:23:44 +0100519 ARM_COMPUTE_ERROR_ON(input == nullptr);
520 ARM_COMPUTE_ERROR_ON(output == nullptr);
521
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100522 // Create and configure function
523 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
524 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100525
526 // Log info
527 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
528 << " Target " << TargetInfo::TargetType
529 << " Data Type: " << input->info()->data_type()
530 << " Input shape: " << input->info()->tensor_shape()
531 << " Output shape: " << output->info()->tensor_shape()
532 << std::endl);
533
534 return std::move(func);
535}
536
537/** Create a backend fully connected layer function
538 *
539 * @tparam FullyConnectedLayerFunction Backend fully-connected function
540 * @tparam TargetInfo Target-specific information
541 *
542 * @param[in] node Node to create the backend function for
543 * @param[in] ctx Graph context
544 *
545 * @return Backend fully connected layer function
546 */
547template <typename FullyConnectedLayerFunction, typename TargetInfo>
548std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
549{
550 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
551
552 // Extract IO and info
553 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
554 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
555 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
556 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100557 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100558
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100559 ARM_COMPUTE_ERROR_ON(input == nullptr);
560 ARM_COMPUTE_ERROR_ON(weights == nullptr);
561 ARM_COMPUTE_ERROR_ON(output == nullptr);
562
Georgios Pinitase2220552018-07-20 13:23:44 +0100563 // Create and configure function
564 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
565 func->configure(input, weights, biases, output, fc_info);
566
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100567 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
568
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100569 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100570 std::ostringstream qss;
571 if(is_quantized)
572 {
573 qss << " Input QuantInfo: " << input->info()->quantization_info()
574 << " Weights QuantInfo: " << weights->info()->quantization_info()
575 << " Output QuantInfo: " << output->info()->quantization_info();
576 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100577 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
578 << " Target " << TargetInfo::TargetType
579 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100580 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100581 << " Input shape: " << input->info()->tensor_shape()
582 << " Weights shape: " << weights->info()->tensor_shape()
583 << " Output shape: " << output->info()->tensor_shape()
584 << std::endl);
585
586 return std::move(func);
587}
588
589/** Create a backend normalization layer function
590 *
591 * @tparam NormalizationLayerFunction Backend normalization function
592 * @tparam TargetInfo Target-specific information
593 *
594 * @param[in] node Node to create the backend function for
595 * @param[in] ctx Graph context
596 *
597 * @return Backend normalization layer function
598 */
599template <typename NormalizationLayerFunction, typename TargetInfo>
600std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
601{
602 ARM_COMPUTE_UNUSED(ctx);
603
604 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
605
606 // Extract IO and info
607 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
608 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
609 const NormalizationLayerInfo norm_info = node.normalization_info();
610 ARM_COMPUTE_ERROR_ON(input == nullptr);
611 ARM_COMPUTE_ERROR_ON(output == nullptr);
612
613 // Create and configure function
614 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
615 func->configure(input, output, norm_info);
616
617 // Log info
618 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
619 << " Target " << TargetInfo::TargetType
620 << " Data Type: " << input->info()->data_type()
621 << " Input shape: " << input->info()->tensor_shape()
622 << " Output shape: " << output->info()->tensor_shape()
623 << " Normalization info: " << norm_info.type()
624 << std::endl);
625
626 return std::move(func);
627}
628
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100629/** Create a backend normalize planar YUV layer function
630 *
631 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
632 * @tparam TargetInfo Target-specific information
633 *
634 * @param[in] node Node to create the backend function for
635 *
636 * @return Backend normalize plnar YUV layer function
637 */
638template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
639std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
640{
641 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
642
643 // Extract IO and info
644 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
645 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
646 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
647 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
648 ARM_COMPUTE_ERROR_ON(input == nullptr);
649 ARM_COMPUTE_ERROR_ON(mean == nullptr);
650 ARM_COMPUTE_ERROR_ON(std == nullptr);
651 ARM_COMPUTE_ERROR_ON(output == nullptr);
652
653 // Create and configure function
654 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
655 func->configure(input, output, mean, std);
656
657 // Log info
658 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
659 << " Target " << TargetInfo::TargetType
660 << " Data Type: " << input->info()->data_type()
661 << " Shape: " << input->info()->tensor_shape()
662 << std::endl);
663
664 return std::move(func);
665}
666
Michele Di Giorgio4bb17332018-09-26 13:56:51 +0100667/** Create a backend pad layer function
668 *
669 * @tparam PadLayerFunction Backend pad function
670 * @tparam TargetInfo Target-specific information
671 *
672 * @param[in] node Node to create the backend function for
673 *
674 * @return Backend pad layer function
675 */
676template <typename PadLayerFunction, typename TargetInfo>
677std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
678{
679 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
680
681 // Extract IO and info
682 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
683 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
684 const PaddingList &padding = node.padding();
685 ARM_COMPUTE_ERROR_ON(input == nullptr);
686 ARM_COMPUTE_ERROR_ON(output == nullptr);
687
688 // Create and configure function
689 auto func = support::cpp14::make_unique<PadLayerFunction>();
690 func->configure(input, output, padding);
691
692 // Log info
693 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
694 << " Target " << TargetInfo::TargetType
695 << " Data Type: " << input->info()->data_type()
696 << " Input shape: " << input->info()->tensor_shape()
697 << " Output shape: " << output->info()->tensor_shape()
698 << std::endl);
699
700 return std::move(func);
701}
702
Georgios Pinitas57c48242018-08-02 13:41:49 +0100703/** Create a backend permute layer function
704 *
705 * @tparam PermuteLayerFunction Backend permute function
706 * @tparam TargetInfo Target-specific information
707 *
708 * @param[in] node Node to create the backend function for
709 *
710 * @return Backend permute layer function
711 */
712template <typename PermuteLayerFunction, typename TargetInfo>
713std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
714{
715 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
716
717 // Extract IO and info
718 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
719 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
720 const PermutationVector &perm = node.permutation_vector();
721 ARM_COMPUTE_ERROR_ON(input == nullptr);
722 ARM_COMPUTE_ERROR_ON(output == nullptr);
723
724 // Create and configure function
725 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
726 func->configure(input, output, perm);
727
728 // Log info
729 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
730 << " Target " << TargetInfo::TargetType
731 << " Data Type: " << input->info()->data_type()
732 << " Input shape: " << input->info()->tensor_shape()
733 << " Output shape: " << output->info()->tensor_shape()
734 << " Permutation vector: " << perm
735 << std::endl);
736
737 return std::move(func);
738}
739
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100740/** Create a backend pooling layer function
741 *
742 * @tparam PoolingLayerFunction Backend pooling function
743 * @tparam TargetInfo Target-specific information
744 *
745 * @param[in] node Node to create the backend function for
746 *
747 * @return Backend pooling layer function
748 */
749template <typename PoolingLayerFunction, typename TargetInfo>
750std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
751{
752 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
753
754 // Extract IO and info
755 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
756 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
757 const PoolingLayerInfo pool_info = node.pooling_info();
758 ARM_COMPUTE_ERROR_ON(input == nullptr);
759 ARM_COMPUTE_ERROR_ON(output == nullptr);
760
761 // Create and configure function
762 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
763 func->configure(input, output, pool_info);
764
765 // Log info
766 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
767 << " Target " << TargetInfo::TargetType
768 << " Data Type: " << input->info()->data_type()
769 << " Input shape: " << input->info()->tensor_shape()
770 << " Output shape: " << output->info()->tensor_shape()
771 << " Pooling info: " << pool_info.pool_type()
772 << std::endl);
773
774 return std::move(func);
775}
776
Gian Marco Iodice23e24792018-09-07 15:32:14 +0100777/** Create a backend reorg layer function
778 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +0100779 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +0100780 * @tparam TargetInfo Target-specific information
781 *
782 * @param[in] node Node to create the backend function for
783 *
784 * @return Backend reshape layer function
785 */
786template <typename ReorgLayerFunction, typename TargetInfo>
787std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
788{
789 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
790
791 // Extract IO and info
792 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
793 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
794 ARM_COMPUTE_ERROR_ON(input == nullptr);
795 ARM_COMPUTE_ERROR_ON(output == nullptr);
796
797 // Create and configure function
798 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
799 func->configure(input, output, node.stride());
800
801 // Log info
802 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
803 << " Target " << TargetInfo::TargetType
804 << " Data Type: " << input->info()->data_type()
805 << " Input shape: " << input->info()->tensor_shape()
806 << " Output shape: " << output->info()->tensor_shape()
807 << std::endl);
808
809 return std::move(func);
810}
811
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100812/** Create a backend reshape layer function
813 *
814 * @tparam ReshapeLayerFunction Backend reshape function
815 * @tparam TargetInfo Target-specific information
816 *
817 * @param[in] node Node to create the backend function for
818 *
819 * @return Backend reshape layer function
820 */
821template <typename ReshapeLayerFunction, typename TargetInfo>
822std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
823{
824 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
825
826 // Extract IO and info
827 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
828 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
829 ARM_COMPUTE_ERROR_ON(input == nullptr);
830 ARM_COMPUTE_ERROR_ON(output == nullptr);
831
832 // Create and configure function
833 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
834 func->configure(input, output);
835
836 // Log info
837 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
838 << " Target " << TargetInfo::TargetType
839 << " Data Type: " << input->info()->data_type()
840 << " Input shape: " << input->info()->tensor_shape()
841 << " Output shape: " << output->info()->tensor_shape()
842 << std::endl);
843
844 return std::move(func);
845}
846
847/** Create a backend resize layer function
848 *
849 * @tparam ResizeLayerFunction Backend resize function
850 * @tparam TargetInfo Target-specific information
851 *
852 * @param[in] node Node to create the backend function for
853 *
854 * @return Backend resize layer function
855 */
856template <typename ResizeLayerFunction, typename TargetInfo>
857std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
858{
859 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
860
861 // Extract IO and info
862 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
863 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
864 ARM_COMPUTE_ERROR_ON(input == nullptr);
865 ARM_COMPUTE_ERROR_ON(output == nullptr);
866 const InterpolationPolicy policy = node.policy();
867
868 // Create and configure function
869 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
870 func->configure(input, output, policy, BorderMode::CONSTANT);
871
872 // Log info
873 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
874 << " Target " << TargetInfo::TargetType
875 << " Data Type: " << input->info()->data_type()
876 << " Input shape: " << input->info()->tensor_shape()
877 << " Output shape: " << output->info()->tensor_shape()
878 << " Interpolation: " << policy
879 << std::endl);
880
881 return std::move(func);
882}
883
Michele Di Giorgioc30b6682018-09-12 17:44:08 +0100884/** Create a backend slice layer function
885 *
886 * @tparam SliceLayerFunction Backend slice function
887 * @tparam TargetInfo Target-specific information
888 *
889 * @param[in] node Node to create the backend function for
890 *
891 * @return Backend slice layer function
892 */
893template <typename SliceLayerFunction, typename TargetInfo>
894std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
895{
896 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
897
898 // Extract IO and info
899 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
900 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
901 ARM_COMPUTE_ERROR_ON(input == nullptr);
902 ARM_COMPUTE_ERROR_ON(output == nullptr);
903
904 // Create and configure function
905 auto func = support::cpp14::make_unique<SliceLayerFunction>();
906 func->configure(input, output, node.starts(), node.ends());
907
908 // Log info
909 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
910 << " Target " << TargetInfo::TargetType
911 << " Data Type: " << input->info()->data_type()
912 << " Input shape: " << input->info()->tensor_shape()
913 << " Output shape: " << output->info()->tensor_shape()
914 << std::endl);
915
916 return std::move(func);
917}
918
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100919/** Create a backend softmax layer function
920 *
921 * @tparam SoftmaxLayerFunction Backend softmax function
922 * @tparam TargetInfo Target-specific information
923 *
924 * @param[in] node Node to create the backend function for
925 * @param[in] ctx Graph context
926 *
927 * @return Backend softmax layer function
928 */
929template <typename SoftmaxLayerFunction, typename TargetInfo>
930std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
931{
932 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
933
934 // Extract IO and info
935 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
936 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
937 const float beta = node.beta();
938 ARM_COMPUTE_ERROR_ON(input == nullptr);
939 ARM_COMPUTE_ERROR_ON(output == nullptr);
940
941 // Create and configure function
942 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
943 func->configure(input, output, beta);
944
945 // Log info
946 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
947 << " Target " << TargetInfo::TargetType
948 << " Data Type: " << input->info()->data_type()
949 << " Input shape: " << input->info()->tensor_shape()
950 << " Output shape: " << output->info()->tensor_shape()
951 << std::endl);
952
953 return std::move(func);
954}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +0100955/** Create a backend Upsample layer function
956 *
957 * @tparam UpsampleLayerFunction Backend Upsample function
958 * @tparam TargetInfo Target-specific information
959 *
960 * @param[in] node Node to create the backend function for
961 * @param[in] ctx Graph context
962 *
963 * @return Backend Upsample layer function
964 */
965template <typename UpsampleLayerFunction, typename TargetInfo>
966std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
967{
968 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
969
970 // Extract IO and info
971 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
972 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
973 const Size2D info = node.info();
974 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
975 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
976 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
977 ARM_COMPUTE_ERROR_ON(input == nullptr);
978 ARM_COMPUTE_ERROR_ON(output == nullptr);
979
980 // Create and configure function
981 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
982 func->configure(input, output, info, upsampling_policy);
983
984 // Log info
985 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
986 << " Target " << TargetInfo::TargetType
987 << " Data Type: " << input->info()->data_type()
988 << " Input shape: " << input->info()->tensor_shape()
989 << " Output shape: " << output->info()->tensor_shape()
990 << " Strides: " << info
991 << " Upsampling policy: " << upsampling_policy
992 << std::endl);
993
994 return std::move(func);
995}
Michalis Spyrou96f67692018-09-13 11:39:28 +0100996/** Create a backend YOLO layer function
997 *
998 * @tparam YoloLayerFunction Backend YOLO function
999 * @tparam TargetInfo Target-specific information
1000 *
1001 * @param[in] node Node to create the backend function for
1002 * @param[in] ctx Graph context
1003 *
1004 * @return Backend YOLO layer function
1005 */
1006template <typename YOLOlayerFunction, typename TargetInfo>
1007std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1008{
1009 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1010
1011 // Extract IO and info
1012 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1013 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1014 const ActivationLayerInfo act_info = node.activation_info();
1015 const int32_t num_classes = node.num_classes();
1016 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1017 ARM_COMPUTE_ERROR_ON(input == nullptr);
1018 ARM_COMPUTE_ERROR_ON(output == nullptr);
1019
1020 // Create and configure function
1021 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1022 func->configure(input, output, act_info, num_classes);
1023
1024 // Log info
1025 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1026 << " Target " << TargetInfo::TargetType
1027 << " Data Type: " << input->info()->data_type()
1028 << " Input shape: " << input->info()->tensor_shape()
1029 << " Output shape: " << output->info()->tensor_shape()
1030 << " Activation function: " << act_info.activation()
1031 << " Num classes: " << num_classes
1032 << std::endl);
1033
1034 return std::move(func);
1035}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001036} // namespace detail
1037} // namespace backends
1038} // namespace graph
1039} // namespace arm_compute
1040
1041#endif /* __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__ */