blob: d235fe9f6f5e71e5d15c542508e3c006416af0d5 [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
25#define __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
26
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
31#include "arm_compute/graph/backends/Utils.h"
32#include "arm_compute/graph/nodes/Nodes.h"
33
34#include "arm_compute/core/Error.h"
35#include "arm_compute/core/Helpers.h"
36#include "arm_compute/core/ITensorInfo.h"
37#include "arm_compute/core/utils/misc/Cast.h"
38
39namespace arm_compute
40{
41namespace graph
42{
43namespace backends
44{
45namespace detail
46{
47/** Returns backing tensor of a given tensor
48 *
49 * @tparam TargetInfo Target information
50 *
51 * @param[in] tensor Tensor to extract the backing tensor from
52 *
53 * @return Backing tensor if present else nullptr
54 */
55template <typename TargetInfo>
56typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
57{
58 typename TargetInfo::TensorType *backing_tensor = nullptr;
59 if(tensor != nullptr)
60 {
61 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
62 // Get backing tensor handle
63 ITensorHandle *tensor_handle = tensor->handle();
64 // Get backing tensor
65 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
66 }
67
68 return backing_tensor;
69}
70
71template <typename TargetInfo>
72void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
73{
74 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
75 << " Target : " << TargetInfo::TargetType
76 << " ID : " << node.id()
77 << " Name: " << node.name()
78 << std::endl);
79
80 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
81 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
82 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
83}
84
85/** Creates a backend activation layer function
86 *
87 * @tparam ActivationLayerFunction Backend activation function
88 * @tparam TargetInfo Target-specific information
89 *
90 * @param[in] node Node to create the backend function for
91 *
92 * @return Backend activation layer function
93 */
94template <typename ActivationLayerFunction, typename TargetInfo>
95std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
96{
97 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
98
99 // Extract IO and info
100 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
101 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
102 const ActivationLayerInfo act_info = node.activation_info();
103
104 // Create function
105 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
106 func->configure(input, output, act_info);
107
108 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
109 << " Target " << TargetInfo::TargetType
110 << " Data Type: " << input->info()->data_type()
111 << " Shape: " << input->info()->tensor_shape()
112 << " Activation function: " << act_info.activation()
113 << " a: " << act_info.a()
114 << " b: " << act_info.b()
115 << " InPlace : " << is_in_place_operation(input, output)
116 << std::endl);
117
118 return std::move(func);
119}
120
121/** Create a backend batch normalization layer function
122 *
123 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
124 * @tparam TargetInfo Target-specific information
125 *
126 * @param[in] node Node to create the backend function for
127 *
128 * @return Backend batch normalization layer function
129 */
130template <typename BatchNormalizationLayerFunction, typename TargetInfo>
131std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
132{
133 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
134
135 // Extract IO and info
136 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
137 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
138 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
139 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
140 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
141 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
142 const float epsilon = node.epsilon();
143 const ActivationLayerInfo fused_act = node.fused_activation();
144
145 // Create and configure function
146 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
147 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
148
149 // Log info
150 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
151 << " Target " << TargetInfo::TargetType
152 << " Data Type: " << input->info()->data_type()
153 << " Shape: " << input->info()->tensor_shape()
154 << " Epsilon: " << epsilon << " "
155 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
156 << " InPlace : " << is_in_place_operation(input, output)
157 << std::endl);
158
159 return std::move(func);
160}
161
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100162/** Create a backend bounding box transform layer function
163 *
164 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
165 * @tparam TargetInfo Target-specific information
166 *
167 * @param[in] node Node to create the backend function for
168 *
169 * @return Backend bounding box transform layer function
170 */
171template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
172std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
173{
174 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
175
176 // Extract IO and info
177 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
178 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
179 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
180 const BoundingBoxTransformInfo bbox_info = node.info();
181
182 // Create and configure function
183 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
184 func->configure(input, output, deltas, bbox_info);
185
186 // Log info
187 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
188 << " Target " << TargetInfo::TargetType
189 << " Data Type: " << input->info()->data_type()
190 << " Shape: " << input->info()->tensor_shape()
191 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
192 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
193 << std::endl);
194
195 return std::move(func);
196}
197
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100198/** Create a backend channel shuffle layer function
199 *
200 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
201 * @tparam TargetInfo Target-specific information
202 *
203 * @param[in] node Node to create the backend function for
204 *
205 * @return Backend channel shuffle layer function
206 */
207template <typename ChannelShuffleLayerFunction, typename TargetInfo>
208std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
209{
210 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
211
212 // Extract IO and info
213 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
214 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
215 const unsigned int num_groups = node.num_groups();
216
217 // Create function
218 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
219 func->configure(input, output, num_groups);
220
221 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
222 << " Target " << TargetInfo::TargetType
223 << " Data Type: " << input->info()->data_type()
224 << " Shape: " << input->info()->tensor_shape()
225 << " Num groups: " << num_groups
226 << std::endl);
227
228 return std::move(func);
229}
230
Georgios Pinitase2220552018-07-20 13:23:44 +0100231/** Create a backend layer concatenate function
232 *
233 * @tparam ConcatenateLayerFunction Backend concatenate function
234 * @tparam TargetInfo Target-specific information
235 *
236 * @param[in] node Node to create the backend function for
237 *
238 * @return Backend concatenate layer function
239 */
240template <typename ConcatenateLayerFunction, typename TargetInfo>
241std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
242{
243 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
244 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
245
246 // Return nullptr if depth concatenate is switched off
247 if(!node.is_enabled())
248 {
249 return nullptr;
250 }
251
252 // Extract IO and info
253 std::vector<typename TargetInfo::TensorType *> inputs;
254 for(unsigned int i = 0; i < node.num_inputs(); ++i)
255 {
256 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
257 }
258 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
259 const DataLayoutDimension concat_axis = node.concatenation_axis();
260
261 // Create and configure function
262 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
263 func->configure(inputs, output, concat_axis);
264
265 // Log info
266 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
267 << " Target " << TargetInfo::TargetType
268 << " Data Type: " << output->info()->data_type()
269 << " Shape: " << output->info()->tensor_shape()
270 << " Num Inputs: " << inputs.size()
271 << " Axis: " << concat_axis
272 << std::endl);
273
274 return std::move(func);
275}
276
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100277/** Create a backend convolution layer function
278 *
279 * @tparam ConvolutionLayerFunctions Backend convolution functions
280 * @tparam TargetInfo Target-specific information
281 *
282 * @param[in] node Node to create the backend function for
283 * @param[in] ctx Graph context
284 *
285 * @return Backend convolution layer function
286 */
287template <typename ConvolutionLayerFunctions, typename TargetInfo>
288std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
289{
290 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
291
292 // Extract IO and info
293 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
294 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
295 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
296 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
297
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100298 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
299
300 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100301 {
302 biases->info()->set_data_type(DataType::S32);
303 }
304
Georgios Pinitas08346e92018-10-16 19:10:46 +0100305 const PadStrideInfo conv_info = node.convolution_info();
306 const unsigned int num_groups = node.num_groups();
307 const ConvolutionMethod conv_algorithm = node.convolution_method();
308 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
309 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100310
311 // Create and configure function (we assume that functions have been validated before creation)
312 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
313 std::unique_ptr<IFunction> func;
314 std::string func_name;
315
Georgios Pinitase2220552018-07-20 13:23:44 +0100316 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100317 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100318 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100319 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
320 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100321 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100322 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100323 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100324 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100325 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100326 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
327 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100328 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100329 }
330 else if(conv_algorithm == ConvolutionMethod::GEMM)
331 {
332 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
333 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100334 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100335 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100336 }
337 else
338 {
339 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
340 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100341 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100342 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100343 }
344
345 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100346 std::ostringstream qss;
347 if(is_quantized)
348 {
349 qss << " Input QuantInfo: " << input->info()->quantization_info()
350 << " Weights QuantInfo: " << weights->info()->quantization_info()
351 << " Output QuantInfo: " << output->info()->quantization_info();
352 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100353 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
354 << " Target " << TargetInfo::TargetType
355 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100356 << " Groups: " << num_groups
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100357 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100358 << " Input shape: " << input->info()->tensor_shape()
359 << " Weights shape: " << weights->info()->tensor_shape()
360 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100361 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362 << std::endl);
363 return func;
364}
365
366/** Create a backend deconvolution layer function
367 *
368 * @tparam DeconvolutionLayerFunction Backend deconvolution function
369 * @tparam TargetInfo Target-specific information
370 *
371 * @param[in] node Node to create the backend function for
372 * @param[in] ctx Graph context
373 *
374 * @return Backend deconvolution layer function
375 */
376template <typename DeconvolutionLayerFunction, typename TargetInfo>
377std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
378{
379 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
380
381 // Extract IO and info
382 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
383 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
384 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
385 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
386
387 const PadStrideInfo deconv_info = node.deconvolution_info();
388 const Size2D inner_border = node.inner_border();
389
390 // Create and configure function (we assume that functions have been validated before creation)
391 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
392 std::unique_ptr<IFunction> func;
393
394 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
395 std::string(), mm,
396 input, weights, biases, output, deconv_info, inner_border.x(), inner_border.y());
397
398 // Log info
399 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
400 << " Target " << TargetInfo::TargetType
401 << " Data Type: " << input->info()->data_type()
402 << " Input shape: " << input->info()->tensor_shape()
403 << " Weights shape: " << weights->info()->tensor_shape()
404 << " Output shape: " << output->info()->tensor_shape()
405 << std::endl);
406 return func;
407}
408
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100409/** Create a backend layer depth-wise convolution function
410 *
411 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
412 * @tparam TargetInfo Target-specific information
413 *
414 * @param[in] node Node to create the backend function for
415 *
416 * @return Backend depth-wise convolution layer function
417 */
418template <typename DepthwiseConvolutionLayerFunctions, typename TargetInfo>
419std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
420{
421 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
422
423 // Extract IO and info
424 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
425 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
426 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
427 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
428
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100429 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
430
431 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100432 {
433 biases->info()->set_data_type(DataType::S32);
434 }
435
Georgios Pinitas60e98252018-10-22 16:17:20 +0100436 const PadStrideInfo conv_info = node.convolution_info();
437 const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
438 const unsigned int depth_multiplier = 1;
439 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100440
441 // Create and configure function (we assume that functions have been validated before creation)
442 std::unique_ptr<IFunction> func;
443 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100444 if(dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100445 {
446 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::DepthwiseConvolutionLayer3x3>(
447 std::string("DepthwiseConvolutionLayer3x3"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100448 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100449 }
450 else
451 {
452 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::GenericDepthwiseConvolutionLayer>(
453 std::string("DepthwiseConvolutionLayer"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100454 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100455 }
456
457 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100458 std::ostringstream qss;
459 if(is_quantized)
460 {
461 qss << " Input QuantInfo: " << input->info()->quantization_info()
462 << " Weights QuantInfo: " << weights->info()->quantization_info()
463 << " Output QuantInfo: " << output->info()->quantization_info();
464 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
466 << " Target " << TargetInfo::TargetType
467 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100468 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100469 << " Input shape: " << input->info()->tensor_shape()
470 << " Weights shape: " << weights->info()->tensor_shape()
471 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100472 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100473 << std::endl);
474 return func;
475}
476
477/** Create a backend element-wise operation layer function
478 *
479 * @tparam EltwiseFunctions Backend element-wise function
480 * @tparam TargetInfo Target-specific information
481 *
482 * @param[in] node Node to create the backend function for
483 *
484 * @return Backend element-wise operation layer function
485 */
486template <typename EltwiseFunctions, typename TargetInfo>
487std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
488{
489 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
490
491 // Extract IO and info
492 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
493 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
494 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
495 const EltwiseOperation eltwise_op = node.eltwise_operation();
496 const ConvertPolicy convert_policy = node.convert_policy();
497 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
498 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
499 ARM_COMPUTE_ERROR_ON(output == nullptr);
500
501 std::unique_ptr<IFunction> func = nullptr;
502 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100503 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100504 {
505 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
506 std::string("ArithmeticAddition"),
507 input1, input2, output, convert_policy);
508 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100509 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100510 {
511 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
512 std::string("ArithmeticSubtraction"),
513 input1, input2, output, convert_policy);
514 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100515 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100516 {
517 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
518 std::string("PixelWiseMultiplication"),
519 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
520 }
521 else
522 {
523 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
524 }
525
526 // Log info
527 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
528 << " Target " << TargetInfo::TargetType
529 << " Operation " << func_name
530 << " Data Type: " << input1->info()->data_type()
531 << " Shape : " << input1->info()->tensor_shape()
532 << std::endl);
533
534 return func;
535}
536
537/** Create a backend flatten layer function
538 *
539 * @tparam FlattenLayerFunction Backend flatten function
540 * @tparam TargetInfo Target-specific information
541 *
542 * @param[in] node Node to create the backend function for
543 *
544 * @return Backend flatten layer function
545 */
546template <typename FlattenLayerFunction, typename TargetInfo>
547std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
548{
549 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
550
551 // Extract IO and info
552 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
553 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
554
Georgios Pinitase2220552018-07-20 13:23:44 +0100555 ARM_COMPUTE_ERROR_ON(input == nullptr);
556 ARM_COMPUTE_ERROR_ON(output == nullptr);
557
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100558 // Create and configure function
559 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
560 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100561
562 // Log info
563 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
564 << " Target " << TargetInfo::TargetType
565 << " Data Type: " << input->info()->data_type()
566 << " Input shape: " << input->info()->tensor_shape()
567 << " Output shape: " << output->info()->tensor_shape()
568 << std::endl);
569
570 return std::move(func);
571}
572
573/** Create a backend fully connected layer function
574 *
575 * @tparam FullyConnectedLayerFunction Backend fully-connected function
576 * @tparam TargetInfo Target-specific information
577 *
578 * @param[in] node Node to create the backend function for
579 * @param[in] ctx Graph context
580 *
581 * @return Backend fully connected layer function
582 */
583template <typename FullyConnectedLayerFunction, typename TargetInfo>
584std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
585{
586 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
587
588 // Extract IO and info
589 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
590 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
591 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
592 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100593 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100594
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100595 ARM_COMPUTE_ERROR_ON(input == nullptr);
596 ARM_COMPUTE_ERROR_ON(weights == nullptr);
597 ARM_COMPUTE_ERROR_ON(output == nullptr);
598
Georgios Pinitase2220552018-07-20 13:23:44 +0100599 // Create and configure function
600 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
601 func->configure(input, weights, biases, output, fc_info);
602
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100603 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
604
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100605 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100606 std::ostringstream qss;
607 if(is_quantized)
608 {
609 qss << " Input QuantInfo: " << input->info()->quantization_info()
610 << " Weights QuantInfo: " << weights->info()->quantization_info()
611 << " Output QuantInfo: " << output->info()->quantization_info();
612 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100613 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
614 << " Target " << TargetInfo::TargetType
615 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100616 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100617 << " Input shape: " << input->info()->tensor_shape()
618 << " Weights shape: " << weights->info()->tensor_shape()
619 << " Output shape: " << output->info()->tensor_shape()
620 << std::endl);
621
622 return std::move(func);
623}
624
625/** Create a backend normalization layer function
626 *
627 * @tparam NormalizationLayerFunction Backend normalization function
628 * @tparam TargetInfo Target-specific information
629 *
630 * @param[in] node Node to create the backend function for
631 * @param[in] ctx Graph context
632 *
633 * @return Backend normalization layer function
634 */
635template <typename NormalizationLayerFunction, typename TargetInfo>
636std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
637{
638 ARM_COMPUTE_UNUSED(ctx);
639
640 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
641
642 // Extract IO and info
643 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
644 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
645 const NormalizationLayerInfo norm_info = node.normalization_info();
646 ARM_COMPUTE_ERROR_ON(input == nullptr);
647 ARM_COMPUTE_ERROR_ON(output == nullptr);
648
649 // Create and configure function
650 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
651 func->configure(input, output, norm_info);
652
653 // Log info
654 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
655 << " Target " << TargetInfo::TargetType
656 << " Data Type: " << input->info()->data_type()
657 << " Input shape: " << input->info()->tensor_shape()
658 << " Output shape: " << output->info()->tensor_shape()
659 << " Normalization info: " << norm_info.type()
660 << std::endl);
661
662 return std::move(func);
663}
664
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100665/** Create a backend normalize planar YUV layer function
666 *
667 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
668 * @tparam TargetInfo Target-specific information
669 *
670 * @param[in] node Node to create the backend function for
671 *
672 * @return Backend normalize plnar YUV layer function
673 */
674template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
675std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
676{
677 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
678
679 // Extract IO and info
680 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
681 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
682 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
683 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
684 ARM_COMPUTE_ERROR_ON(input == nullptr);
685 ARM_COMPUTE_ERROR_ON(mean == nullptr);
686 ARM_COMPUTE_ERROR_ON(std == nullptr);
687 ARM_COMPUTE_ERROR_ON(output == nullptr);
688
689 // Create and configure function
690 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
691 func->configure(input, output, mean, std);
692
693 // Log info
694 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
695 << " Target " << TargetInfo::TargetType
696 << " Data Type: " << input->info()->data_type()
697 << " Shape: " << input->info()->tensor_shape()
698 << std::endl);
699
700 return std::move(func);
701}
702
Michele Di Giorgio4bb17332018-09-26 13:56:51 +0100703/** Create a backend pad layer function
704 *
705 * @tparam PadLayerFunction Backend pad function
706 * @tparam TargetInfo Target-specific information
707 *
708 * @param[in] node Node to create the backend function for
709 *
710 * @return Backend pad layer function
711 */
712template <typename PadLayerFunction, typename TargetInfo>
713std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
714{
715 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
716
717 // Extract IO and info
718 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
719 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
720 const PaddingList &padding = node.padding();
721 ARM_COMPUTE_ERROR_ON(input == nullptr);
722 ARM_COMPUTE_ERROR_ON(output == nullptr);
723
724 // Create and configure function
725 auto func = support::cpp14::make_unique<PadLayerFunction>();
726 func->configure(input, output, padding);
727
728 // Log info
729 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
730 << " Target " << TargetInfo::TargetType
731 << " Data Type: " << input->info()->data_type()
732 << " Input shape: " << input->info()->tensor_shape()
733 << " Output shape: " << output->info()->tensor_shape()
734 << std::endl);
735
736 return std::move(func);
737}
738
Georgios Pinitas57c48242018-08-02 13:41:49 +0100739/** Create a backend permute layer function
740 *
741 * @tparam PermuteLayerFunction Backend permute function
742 * @tparam TargetInfo Target-specific information
743 *
744 * @param[in] node Node to create the backend function for
745 *
746 * @return Backend permute layer function
747 */
748template <typename PermuteLayerFunction, typename TargetInfo>
749std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
750{
751 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
752
753 // Extract IO and info
754 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
755 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
756 const PermutationVector &perm = node.permutation_vector();
757 ARM_COMPUTE_ERROR_ON(input == nullptr);
758 ARM_COMPUTE_ERROR_ON(output == nullptr);
759
760 // Create and configure function
761 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
762 func->configure(input, output, perm);
763
764 // Log info
765 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
766 << " Target " << TargetInfo::TargetType
767 << " Data Type: " << input->info()->data_type()
768 << " Input shape: " << input->info()->tensor_shape()
769 << " Output shape: " << output->info()->tensor_shape()
770 << " Permutation vector: " << perm
771 << std::endl);
772
773 return std::move(func);
774}
775
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100776/** Create a backend pooling layer function
777 *
778 * @tparam PoolingLayerFunction Backend pooling function
779 * @tparam TargetInfo Target-specific information
780 *
781 * @param[in] node Node to create the backend function for
782 *
783 * @return Backend pooling layer function
784 */
785template <typename PoolingLayerFunction, typename TargetInfo>
786std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
787{
788 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
789
790 // Extract IO and info
791 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
792 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
793 const PoolingLayerInfo pool_info = node.pooling_info();
794 ARM_COMPUTE_ERROR_ON(input == nullptr);
795 ARM_COMPUTE_ERROR_ON(output == nullptr);
796
797 // Create and configure function
798 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
799 func->configure(input, output, pool_info);
800
801 // Log info
802 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
803 << " Target " << TargetInfo::TargetType
804 << " Data Type: " << input->info()->data_type()
805 << " Input shape: " << input->info()->tensor_shape()
806 << " Output shape: " << output->info()->tensor_shape()
807 << " Pooling info: " << pool_info.pool_type()
808 << std::endl);
809
810 return std::move(func);
811}
812
Gian Marco Iodice23e24792018-09-07 15:32:14 +0100813/** Create a backend reorg layer function
814 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +0100815 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +0100816 * @tparam TargetInfo Target-specific information
817 *
818 * @param[in] node Node to create the backend function for
819 *
820 * @return Backend reshape layer function
821 */
822template <typename ReorgLayerFunction, typename TargetInfo>
823std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
824{
825 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
826
827 // Extract IO and info
828 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
829 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
830 ARM_COMPUTE_ERROR_ON(input == nullptr);
831 ARM_COMPUTE_ERROR_ON(output == nullptr);
832
833 // Create and configure function
834 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
835 func->configure(input, output, node.stride());
836
837 // Log info
838 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
839 << " Target " << TargetInfo::TargetType
840 << " Data Type: " << input->info()->data_type()
841 << " Input shape: " << input->info()->tensor_shape()
842 << " Output shape: " << output->info()->tensor_shape()
843 << std::endl);
844
845 return std::move(func);
846}
847
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100848/** Create a backend reshape layer function
849 *
850 * @tparam ReshapeLayerFunction Backend reshape function
851 * @tparam TargetInfo Target-specific information
852 *
853 * @param[in] node Node to create the backend function for
854 *
855 * @return Backend reshape layer function
856 */
857template <typename ReshapeLayerFunction, typename TargetInfo>
858std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
859{
860 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
861
862 // Extract IO and info
863 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
864 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
865 ARM_COMPUTE_ERROR_ON(input == nullptr);
866 ARM_COMPUTE_ERROR_ON(output == nullptr);
867
868 // Create and configure function
869 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
870 func->configure(input, output);
871
872 // Log info
873 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
874 << " Target " << TargetInfo::TargetType
875 << " Data Type: " << input->info()->data_type()
876 << " Input shape: " << input->info()->tensor_shape()
877 << " Output shape: " << output->info()->tensor_shape()
878 << std::endl);
879
880 return std::move(func);
881}
882
883/** Create a backend resize layer function
884 *
885 * @tparam ResizeLayerFunction Backend resize function
886 * @tparam TargetInfo Target-specific information
887 *
888 * @param[in] node Node to create the backend function for
889 *
890 * @return Backend resize layer function
891 */
892template <typename ResizeLayerFunction, typename TargetInfo>
893std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
894{
895 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
896
897 // Extract IO and info
898 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
899 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
900 ARM_COMPUTE_ERROR_ON(input == nullptr);
901 ARM_COMPUTE_ERROR_ON(output == nullptr);
902 const InterpolationPolicy policy = node.policy();
903
904 // Create and configure function
905 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
906 func->configure(input, output, policy, BorderMode::CONSTANT);
907
908 // Log info
909 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
910 << " Target " << TargetInfo::TargetType
911 << " Data Type: " << input->info()->data_type()
912 << " Input shape: " << input->info()->tensor_shape()
913 << " Output shape: " << output->info()->tensor_shape()
914 << " Interpolation: " << policy
915 << std::endl);
916
917 return std::move(func);
918}
919
Michele Di Giorgioc30b6682018-09-12 17:44:08 +0100920/** Create a backend slice layer function
921 *
922 * @tparam SliceLayerFunction Backend slice function
923 * @tparam TargetInfo Target-specific information
924 *
925 * @param[in] node Node to create the backend function for
926 *
927 * @return Backend slice layer function
928 */
929template <typename SliceLayerFunction, typename TargetInfo>
930std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
931{
932 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
933
934 // Extract IO and info
935 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
936 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
937 ARM_COMPUTE_ERROR_ON(input == nullptr);
938 ARM_COMPUTE_ERROR_ON(output == nullptr);
939
940 // Create and configure function
941 auto func = support::cpp14::make_unique<SliceLayerFunction>();
942 func->configure(input, output, node.starts(), node.ends());
943
944 // Log info
945 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
946 << " Target " << TargetInfo::TargetType
947 << " Data Type: " << input->info()->data_type()
948 << " Input shape: " << input->info()->tensor_shape()
949 << " Output shape: " << output->info()->tensor_shape()
950 << std::endl);
951
952 return std::move(func);
953}
954
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100955/** Create a backend softmax layer function
956 *
957 * @tparam SoftmaxLayerFunction Backend softmax function
958 * @tparam TargetInfo Target-specific information
959 *
960 * @param[in] node Node to create the backend function for
961 * @param[in] ctx Graph context
962 *
963 * @return Backend softmax layer function
964 */
965template <typename SoftmaxLayerFunction, typename TargetInfo>
966std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
967{
968 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
969
970 // Extract IO and info
971 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
972 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
973 const float beta = node.beta();
974 ARM_COMPUTE_ERROR_ON(input == nullptr);
975 ARM_COMPUTE_ERROR_ON(output == nullptr);
976
977 // Create and configure function
978 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
979 func->configure(input, output, beta);
980
981 // Log info
982 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
983 << " Target " << TargetInfo::TargetType
984 << " Data Type: " << input->info()->data_type()
985 << " Input shape: " << input->info()->tensor_shape()
986 << " Output shape: " << output->info()->tensor_shape()
987 << std::endl);
988
989 return std::move(func);
990}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +0100991/** Create a backend Upsample layer function
992 *
993 * @tparam UpsampleLayerFunction Backend Upsample function
994 * @tparam TargetInfo Target-specific information
995 *
996 * @param[in] node Node to create the backend function for
997 * @param[in] ctx Graph context
998 *
999 * @return Backend Upsample layer function
1000 */
1001template <typename UpsampleLayerFunction, typename TargetInfo>
1002std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1003{
1004 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1005
1006 // Extract IO and info
1007 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1008 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1009 const Size2D info = node.info();
1010 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1011 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1012 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1013 ARM_COMPUTE_ERROR_ON(input == nullptr);
1014 ARM_COMPUTE_ERROR_ON(output == nullptr);
1015
1016 // Create and configure function
1017 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1018 func->configure(input, output, info, upsampling_policy);
1019
1020 // Log info
1021 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1022 << " Target " << TargetInfo::TargetType
1023 << " Data Type: " << input->info()->data_type()
1024 << " Input shape: " << input->info()->tensor_shape()
1025 << " Output shape: " << output->info()->tensor_shape()
1026 << " Strides: " << info
1027 << " Upsampling policy: " << upsampling_policy
1028 << std::endl);
1029
1030 return std::move(func);
1031}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001032/** Create a backend YOLO layer function
1033 *
1034 * @tparam YoloLayerFunction Backend YOLO function
1035 * @tparam TargetInfo Target-specific information
1036 *
1037 * @param[in] node Node to create the backend function for
1038 * @param[in] ctx Graph context
1039 *
1040 * @return Backend YOLO layer function
1041 */
1042template <typename YOLOlayerFunction, typename TargetInfo>
1043std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1044{
1045 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1046
1047 // Extract IO and info
1048 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1049 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1050 const ActivationLayerInfo act_info = node.activation_info();
1051 const int32_t num_classes = node.num_classes();
1052 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1053 ARM_COMPUTE_ERROR_ON(input == nullptr);
1054 ARM_COMPUTE_ERROR_ON(output == nullptr);
1055
1056 // Create and configure function
1057 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1058 func->configure(input, output, act_info, num_classes);
1059
1060 // Log info
1061 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1062 << " Target " << TargetInfo::TargetType
1063 << " Data Type: " << input->info()->data_type()
1064 << " Input shape: " << input->info()->tensor_shape()
1065 << " Output shape: " << output->info()->tensor_shape()
1066 << " Activation function: " << act_info.activation()
1067 << " Num classes: " << num_classes
1068 << std::endl);
1069
1070 return std::move(func);
1071}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001072} // namespace detail
1073} // namespace backends
1074} // namespace graph
1075} // namespace arm_compute
1076
1077#endif /* __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__ */