blob: 082d43afdb812143bfff3c60a9f4313b127fe20c [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
25#define __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
26
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
31#include "arm_compute/graph/backends/Utils.h"
32#include "arm_compute/graph/nodes/Nodes.h"
33
34#include "arm_compute/core/Error.h"
35#include "arm_compute/core/Helpers.h"
36#include "arm_compute/core/ITensorInfo.h"
37#include "arm_compute/core/utils/misc/Cast.h"
38
39namespace arm_compute
40{
41namespace graph
42{
43namespace backends
44{
45namespace detail
46{
47/** Returns backing tensor of a given tensor
48 *
49 * @tparam TargetInfo Target information
50 *
51 * @param[in] tensor Tensor to extract the backing tensor from
52 *
53 * @return Backing tensor if present else nullptr
54 */
55template <typename TargetInfo>
56typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
57{
58 typename TargetInfo::TensorType *backing_tensor = nullptr;
59 if(tensor != nullptr)
60 {
61 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
62 // Get backing tensor handle
63 ITensorHandle *tensor_handle = tensor->handle();
64 // Get backing tensor
65 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
66 }
67
68 return backing_tensor;
69}
70
71template <typename TargetInfo>
72void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
73{
74 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
75 << " Target : " << TargetInfo::TargetType
76 << " ID : " << node.id()
77 << " Name: " << node.name()
78 << std::endl);
79
80 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
81 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
82 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
83}
84
85/** Creates a backend activation layer function
86 *
87 * @tparam ActivationLayerFunction Backend activation function
88 * @tparam TargetInfo Target-specific information
89 *
90 * @param[in] node Node to create the backend function for
91 *
92 * @return Backend activation layer function
93 */
94template <typename ActivationLayerFunction, typename TargetInfo>
95std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
96{
97 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
98
99 // Extract IO and info
100 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
101 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
102 const ActivationLayerInfo act_info = node.activation_info();
103
104 // Create function
105 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
106 func->configure(input, output, act_info);
107
108 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
109 << " Target " << TargetInfo::TargetType
110 << " Data Type: " << input->info()->data_type()
111 << " Shape: " << input->info()->tensor_shape()
112 << " Activation function: " << act_info.activation()
113 << " a: " << act_info.a()
114 << " b: " << act_info.b()
115 << " InPlace : " << is_in_place_operation(input, output)
116 << std::endl);
117
118 return std::move(func);
119}
120
121/** Create a backend batch normalization layer function
122 *
123 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
124 * @tparam TargetInfo Target-specific information
125 *
126 * @param[in] node Node to create the backend function for
127 *
128 * @return Backend batch normalization layer function
129 */
130template <typename BatchNormalizationLayerFunction, typename TargetInfo>
131std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
132{
133 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
134
135 // Extract IO and info
136 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
137 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
138 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
139 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
140 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
141 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
142 const float epsilon = node.epsilon();
143 const ActivationLayerInfo fused_act = node.fused_activation();
144
145 // Create and configure function
146 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
147 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
148
149 // Log info
150 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
151 << " Target " << TargetInfo::TargetType
152 << " Data Type: " << input->info()->data_type()
153 << " Shape: " << input->info()->tensor_shape()
154 << " Epsilon: " << epsilon << " "
155 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
156 << " InPlace : " << is_in_place_operation(input, output)
157 << std::endl);
158
159 return std::move(func);
160}
161
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100162/** Create a backend bounding box transform layer function
163 *
164 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
165 * @tparam TargetInfo Target-specific information
166 *
167 * @param[in] node Node to create the backend function for
168 *
169 * @return Backend bounding box transform layer function
170 */
171template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
172std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
173{
174 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
175
176 // Extract IO and info
177 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
178 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
179 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
180 const BoundingBoxTransformInfo bbox_info = node.info();
181
182 // Create and configure function
183 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
184 func->configure(input, output, deltas, bbox_info);
185
186 // Log info
187 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
188 << " Target " << TargetInfo::TargetType
189 << " Data Type: " << input->info()->data_type()
190 << " Shape: " << input->info()->tensor_shape()
191 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
192 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
193 << std::endl);
194
195 return std::move(func);
196}
197
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100198/** Create a backend channel shuffle layer function
199 *
200 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
201 * @tparam TargetInfo Target-specific information
202 *
203 * @param[in] node Node to create the backend function for
204 *
205 * @return Backend channel shuffle layer function
206 */
207template <typename ChannelShuffleLayerFunction, typename TargetInfo>
208std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
209{
210 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
211
212 // Extract IO and info
213 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
214 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
215 const unsigned int num_groups = node.num_groups();
216
217 // Create function
218 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
219 func->configure(input, output, num_groups);
220
221 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
222 << " Target " << TargetInfo::TargetType
223 << " Data Type: " << input->info()->data_type()
224 << " Shape: " << input->info()->tensor_shape()
225 << " Num groups: " << num_groups
226 << std::endl);
227
228 return std::move(func);
229}
230
Georgios Pinitase2220552018-07-20 13:23:44 +0100231/** Create a backend layer concatenate function
232 *
233 * @tparam ConcatenateLayerFunction Backend concatenate function
234 * @tparam TargetInfo Target-specific information
235 *
236 * @param[in] node Node to create the backend function for
237 *
238 * @return Backend concatenate layer function
239 */
240template <typename ConcatenateLayerFunction, typename TargetInfo>
241std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
242{
243 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
244 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
245
246 // Return nullptr if depth concatenate is switched off
247 if(!node.is_enabled())
248 {
249 return nullptr;
250 }
251
252 // Extract IO and info
253 std::vector<typename TargetInfo::TensorType *> inputs;
254 for(unsigned int i = 0; i < node.num_inputs(); ++i)
255 {
256 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
257 }
258 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
259 const DataLayoutDimension concat_axis = node.concatenation_axis();
260
261 // Create and configure function
262 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
263 func->configure(inputs, output, concat_axis);
264
265 // Log info
266 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
267 << " Target " << TargetInfo::TargetType
268 << " Data Type: " << output->info()->data_type()
269 << " Shape: " << output->info()->tensor_shape()
270 << " Num Inputs: " << inputs.size()
271 << " Axis: " << concat_axis
272 << std::endl);
273
274 return std::move(func);
275}
276
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100277/** Create a backend convolution layer function
278 *
279 * @tparam ConvolutionLayerFunctions Backend convolution functions
280 * @tparam TargetInfo Target-specific information
281 *
282 * @param[in] node Node to create the backend function for
283 * @param[in] ctx Graph context
284 *
285 * @return Backend convolution layer function
286 */
287template <typename ConvolutionLayerFunctions, typename TargetInfo>
288std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
289{
290 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
291
292 // Extract IO and info
293 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
294 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
295 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
296 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
297
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100298 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
299
300 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100301 {
302 biases->info()->set_data_type(DataType::S32);
303 }
304
Georgios Pinitas08346e92018-10-16 19:10:46 +0100305 const PadStrideInfo conv_info = node.convolution_info();
306 const unsigned int num_groups = node.num_groups();
307 const ConvolutionMethod conv_algorithm = node.convolution_method();
308 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
309 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100310
311 // Create and configure function (we assume that functions have been validated before creation)
312 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
313 std::unique_ptr<IFunction> func;
314 std::string func_name;
315
Georgios Pinitase2220552018-07-20 13:23:44 +0100316 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100317 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100318 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100319 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
320 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100321 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100322 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100323 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100324 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100325 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100326 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
327 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100328 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100329 }
330 else if(conv_algorithm == ConvolutionMethod::GEMM)
331 {
332 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
333 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100334 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100335 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100336 }
337 else
338 {
339 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
340 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100341 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100342 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100343 }
344
345 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100346 std::ostringstream qss;
347 if(is_quantized)
348 {
349 qss << " Input QuantInfo: " << input->info()->quantization_info()
350 << " Weights QuantInfo: " << weights->info()->quantization_info()
351 << " Output QuantInfo: " << output->info()->quantization_info();
352 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100353 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
354 << " Target " << TargetInfo::TargetType
355 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100356 << " Groups: " << num_groups
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100357 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100358 << " Input shape: " << input->info()->tensor_shape()
359 << " Weights shape: " << weights->info()->tensor_shape()
360 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100361 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362 << std::endl);
363 return func;
364}
365
366/** Create a backend deconvolution layer function
367 *
368 * @tparam DeconvolutionLayerFunction Backend deconvolution function
369 * @tparam TargetInfo Target-specific information
370 *
371 * @param[in] node Node to create the backend function for
372 * @param[in] ctx Graph context
373 *
374 * @return Backend deconvolution layer function
375 */
376template <typename DeconvolutionLayerFunction, typename TargetInfo>
377std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
378{
379 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
380
381 // Extract IO and info
382 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
383 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
384 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
385 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
386
387 const PadStrideInfo deconv_info = node.deconvolution_info();
388 const Size2D inner_border = node.inner_border();
389
390 // Create and configure function (we assume that functions have been validated before creation)
391 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
392 std::unique_ptr<IFunction> func;
393
394 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
395 std::string(), mm,
396 input, weights, biases, output, deconv_info, inner_border.x(), inner_border.y());
397
398 // Log info
399 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
400 << " Target " << TargetInfo::TargetType
401 << " Data Type: " << input->info()->data_type()
402 << " Input shape: " << input->info()->tensor_shape()
403 << " Weights shape: " << weights->info()->tensor_shape()
404 << " Output shape: " << output->info()->tensor_shape()
405 << std::endl);
406 return func;
407}
408
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100409/** Create a backend layer depth-wise convolution function
410 *
411 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
412 * @tparam TargetInfo Target-specific information
413 *
414 * @param[in] node Node to create the backend function for
415 *
416 * @return Backend depth-wise convolution layer function
417 */
418template <typename DepthwiseConvolutionLayerFunctions, typename TargetInfo>
419std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
420{
421 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
422
423 // Extract IO and info
424 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
425 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
426 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
427 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
428
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100429 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
430
431 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100432 {
433 biases->info()->set_data_type(DataType::S32);
434 }
435
Georgios Pinitas60e98252018-10-22 16:17:20 +0100436 const PadStrideInfo conv_info = node.convolution_info();
437 const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
438 const unsigned int depth_multiplier = 1;
439 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100440
441 // Create and configure function (we assume that functions have been validated before creation)
442 std::unique_ptr<IFunction> func;
443 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100444 if(dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100445 {
446 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::DepthwiseConvolutionLayer3x3>(
447 std::string("DepthwiseConvolutionLayer3x3"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100448 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100449 }
450 else
451 {
452 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::GenericDepthwiseConvolutionLayer>(
453 std::string("DepthwiseConvolutionLayer"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100454 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100455 }
456
457 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100458 std::ostringstream qss;
459 if(is_quantized)
460 {
461 qss << " Input QuantInfo: " << input->info()->quantization_info()
462 << " Weights QuantInfo: " << weights->info()->quantization_info()
463 << " Output QuantInfo: " << output->info()->quantization_info();
464 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
466 << " Target " << TargetInfo::TargetType
467 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100468 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100469 << " Input shape: " << input->info()->tensor_shape()
470 << " Weights shape: " << weights->info()->tensor_shape()
471 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100472 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100473 << std::endl);
474 return func;
475}
476
477/** Create a backend element-wise operation layer function
478 *
479 * @tparam EltwiseFunctions Backend element-wise function
480 * @tparam TargetInfo Target-specific information
481 *
482 * @param[in] node Node to create the backend function for
483 *
484 * @return Backend element-wise operation layer function
485 */
486template <typename EltwiseFunctions, typename TargetInfo>
487std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
488{
489 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
490
491 // Extract IO and info
492 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
493 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
494 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
495 const EltwiseOperation eltwise_op = node.eltwise_operation();
496 const ConvertPolicy convert_policy = node.convert_policy();
497 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
498 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
499 ARM_COMPUTE_ERROR_ON(output == nullptr);
500
501 std::unique_ptr<IFunction> func = nullptr;
502 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100503 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100504 {
505 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
506 std::string("ArithmeticAddition"),
507 input1, input2, output, convert_policy);
508 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100509 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100510 {
511 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
512 std::string("ArithmeticSubtraction"),
513 input1, input2, output, convert_policy);
514 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100515 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100516 {
517 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
518 std::string("PixelWiseMultiplication"),
519 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
520 }
521 else
522 {
523 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
524 }
525
526 // Log info
527 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
528 << " Target " << TargetInfo::TargetType
529 << " Operation " << func_name
530 << " Data Type: " << input1->info()->data_type()
531 << " Shape : " << input1->info()->tensor_shape()
532 << std::endl);
533
534 return func;
535}
536
537/** Create a backend flatten layer function
538 *
539 * @tparam FlattenLayerFunction Backend flatten function
540 * @tparam TargetInfo Target-specific information
541 *
542 * @param[in] node Node to create the backend function for
543 *
544 * @return Backend flatten layer function
545 */
546template <typename FlattenLayerFunction, typename TargetInfo>
547std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
548{
549 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
550
551 // Extract IO and info
552 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
553 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
554
Georgios Pinitase2220552018-07-20 13:23:44 +0100555 ARM_COMPUTE_ERROR_ON(input == nullptr);
556 ARM_COMPUTE_ERROR_ON(output == nullptr);
557
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100558 // Create and configure function
559 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
560 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100561
562 // Log info
563 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
564 << " Target " << TargetInfo::TargetType
565 << " Data Type: " << input->info()->data_type()
566 << " Input shape: " << input->info()->tensor_shape()
567 << " Output shape: " << output->info()->tensor_shape()
568 << std::endl);
569
570 return std::move(func);
571}
572
573/** Create a backend fully connected layer function
574 *
575 * @tparam FullyConnectedLayerFunction Backend fully-connected function
576 * @tparam TargetInfo Target-specific information
577 *
578 * @param[in] node Node to create the backend function for
579 * @param[in] ctx Graph context
580 *
581 * @return Backend fully connected layer function
582 */
583template <typename FullyConnectedLayerFunction, typename TargetInfo>
584std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
585{
586 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
587
588 // Extract IO and info
589 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
590 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
591 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
592 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100593 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100594
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100595 ARM_COMPUTE_ERROR_ON(input == nullptr);
596 ARM_COMPUTE_ERROR_ON(weights == nullptr);
597 ARM_COMPUTE_ERROR_ON(output == nullptr);
598
Georgios Pinitase2220552018-07-20 13:23:44 +0100599 // Create and configure function
600 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
601 func->configure(input, weights, biases, output, fc_info);
602
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100603 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
604
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100605 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100606 std::ostringstream qss;
607 if(is_quantized)
608 {
609 qss << " Input QuantInfo: " << input->info()->quantization_info()
610 << " Weights QuantInfo: " << weights->info()->quantization_info()
611 << " Output QuantInfo: " << output->info()->quantization_info();
612 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100613 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
614 << " Target " << TargetInfo::TargetType
615 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100616 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100617 << " Input shape: " << input->info()->tensor_shape()
618 << " Weights shape: " << weights->info()->tensor_shape()
619 << " Output shape: " << output->info()->tensor_shape()
620 << std::endl);
621
622 return std::move(func);
623}
624
Michele Di Giorgio47e6fed2018-11-13 12:04:25 +0000625/** Create a backend generate proposals layer function
626 *
627 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
628 * @tparam TargetInfo Target-specific information
629 *
630 * @param[in] node Node to create the backend function for
631 * @param[in] ctx Graph context
632 *
633 * @return Backend generate proposals layer function
634 */
635template <typename GenerateProposalsLayerFunction, typename TargetInfo>
636std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
637{
638 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
639
640 // Extract IO and info
641 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
642 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
643 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
644 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
645 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
646 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
647 const GenerateProposalsInfo info = node.info();
648
649 ARM_COMPUTE_ERROR_ON(scores == nullptr);
650 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
651 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
652 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
653 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
654
655 // Create and configure function
656 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
657 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
658
659 // Log info
660 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
661 << " Target " << TargetInfo::TargetType
662 << " Data Type: " << scores->info()->data_type()
663 << " Scores shape: " << scores->info()->tensor_shape()
664 << " Deltas shape: " << deltas->info()->tensor_shape()
665 << " Anchors shape: " << anchors->info()->tensor_shape()
666 << " Proposals shape: " << proposals->info()->tensor_shape()
667 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
668 << " Scores Out shape: " << scores_out->info()->tensor_shape()
669 << std::endl);
670
671 return std::move(func);
672}
673
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100674/** Create a backend normalization layer function
675 *
676 * @tparam NormalizationLayerFunction Backend normalization function
677 * @tparam TargetInfo Target-specific information
678 *
679 * @param[in] node Node to create the backend function for
680 * @param[in] ctx Graph context
681 *
682 * @return Backend normalization layer function
683 */
684template <typename NormalizationLayerFunction, typename TargetInfo>
685std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
686{
687 ARM_COMPUTE_UNUSED(ctx);
688
689 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
690
691 // Extract IO and info
692 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
693 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
694 const NormalizationLayerInfo norm_info = node.normalization_info();
695 ARM_COMPUTE_ERROR_ON(input == nullptr);
696 ARM_COMPUTE_ERROR_ON(output == nullptr);
697
698 // Create and configure function
699 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
700 func->configure(input, output, norm_info);
701
702 // Log info
703 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
704 << " Target " << TargetInfo::TargetType
705 << " Data Type: " << input->info()->data_type()
706 << " Input shape: " << input->info()->tensor_shape()
707 << " Output shape: " << output->info()->tensor_shape()
708 << " Normalization info: " << norm_info.type()
709 << std::endl);
710
711 return std::move(func);
712}
713
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100714/** Create a backend normalize planar YUV layer function
715 *
716 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
717 * @tparam TargetInfo Target-specific information
718 *
719 * @param[in] node Node to create the backend function for
720 *
721 * @return Backend normalize plnar YUV layer function
722 */
723template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
724std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
725{
726 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
727
728 // Extract IO and info
729 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
730 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
731 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
732 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
733 ARM_COMPUTE_ERROR_ON(input == nullptr);
734 ARM_COMPUTE_ERROR_ON(mean == nullptr);
735 ARM_COMPUTE_ERROR_ON(std == nullptr);
736 ARM_COMPUTE_ERROR_ON(output == nullptr);
737
738 // Create and configure function
739 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
740 func->configure(input, output, mean, std);
741
742 // Log info
743 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
744 << " Target " << TargetInfo::TargetType
745 << " Data Type: " << input->info()->data_type()
746 << " Shape: " << input->info()->tensor_shape()
747 << std::endl);
748
749 return std::move(func);
750}
751
Michele Di Giorgio4bb17332018-09-26 13:56:51 +0100752/** Create a backend pad layer function
753 *
754 * @tparam PadLayerFunction Backend pad function
755 * @tparam TargetInfo Target-specific information
756 *
757 * @param[in] node Node to create the backend function for
758 *
759 * @return Backend pad layer function
760 */
761template <typename PadLayerFunction, typename TargetInfo>
762std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
763{
764 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
765
766 // Extract IO and info
767 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
768 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
769 const PaddingList &padding = node.padding();
770 ARM_COMPUTE_ERROR_ON(input == nullptr);
771 ARM_COMPUTE_ERROR_ON(output == nullptr);
772
773 // Create and configure function
774 auto func = support::cpp14::make_unique<PadLayerFunction>();
775 func->configure(input, output, padding);
776
777 // Log info
778 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
779 << " Target " << TargetInfo::TargetType
780 << " Data Type: " << input->info()->data_type()
781 << " Input shape: " << input->info()->tensor_shape()
782 << " Output shape: " << output->info()->tensor_shape()
783 << std::endl);
784
785 return std::move(func);
786}
787
Georgios Pinitas57c48242018-08-02 13:41:49 +0100788/** Create a backend permute layer function
789 *
790 * @tparam PermuteLayerFunction Backend permute function
791 * @tparam TargetInfo Target-specific information
792 *
793 * @param[in] node Node to create the backend function for
794 *
795 * @return Backend permute layer function
796 */
797template <typename PermuteLayerFunction, typename TargetInfo>
798std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
799{
800 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
801
802 // Extract IO and info
803 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
804 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
805 const PermutationVector &perm = node.permutation_vector();
806 ARM_COMPUTE_ERROR_ON(input == nullptr);
807 ARM_COMPUTE_ERROR_ON(output == nullptr);
808
809 // Create and configure function
810 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
811 func->configure(input, output, perm);
812
813 // Log info
814 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
815 << " Target " << TargetInfo::TargetType
816 << " Data Type: " << input->info()->data_type()
817 << " Input shape: " << input->info()->tensor_shape()
818 << " Output shape: " << output->info()->tensor_shape()
819 << " Permutation vector: " << perm
820 << std::endl);
821
822 return std::move(func);
823}
824
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100825/** Create a backend pooling layer function
826 *
827 * @tparam PoolingLayerFunction Backend pooling function
828 * @tparam TargetInfo Target-specific information
829 *
830 * @param[in] node Node to create the backend function for
831 *
832 * @return Backend pooling layer function
833 */
834template <typename PoolingLayerFunction, typename TargetInfo>
835std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
836{
837 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
838
839 // Extract IO and info
840 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
841 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
842 const PoolingLayerInfo pool_info = node.pooling_info();
843 ARM_COMPUTE_ERROR_ON(input == nullptr);
844 ARM_COMPUTE_ERROR_ON(output == nullptr);
845
846 // Create and configure function
847 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
848 func->configure(input, output, pool_info);
849
850 // Log info
851 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
852 << " Target " << TargetInfo::TargetType
853 << " Data Type: " << input->info()->data_type()
854 << " Input shape: " << input->info()->tensor_shape()
855 << " Output shape: " << output->info()->tensor_shape()
856 << " Pooling info: " << pool_info.pool_type()
857 << std::endl);
858
859 return std::move(func);
860}
861
Gian Marco Iodice23e24792018-09-07 15:32:14 +0100862/** Create a backend reorg layer function
863 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +0100864 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +0100865 * @tparam TargetInfo Target-specific information
866 *
867 * @param[in] node Node to create the backend function for
868 *
869 * @return Backend reshape layer function
870 */
871template <typename ReorgLayerFunction, typename TargetInfo>
872std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
873{
874 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
875
876 // Extract IO and info
877 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
878 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
879 ARM_COMPUTE_ERROR_ON(input == nullptr);
880 ARM_COMPUTE_ERROR_ON(output == nullptr);
881
882 // Create and configure function
883 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
884 func->configure(input, output, node.stride());
885
886 // Log info
887 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
888 << " Target " << TargetInfo::TargetType
889 << " Data Type: " << input->info()->data_type()
890 << " Input shape: " << input->info()->tensor_shape()
891 << " Output shape: " << output->info()->tensor_shape()
892 << std::endl);
893
894 return std::move(func);
895}
896
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100897/** Create a backend reshape layer function
898 *
899 * @tparam ReshapeLayerFunction Backend reshape function
900 * @tparam TargetInfo Target-specific information
901 *
902 * @param[in] node Node to create the backend function for
903 *
904 * @return Backend reshape layer function
905 */
906template <typename ReshapeLayerFunction, typename TargetInfo>
907std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
908{
909 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
910
911 // Extract IO and info
912 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
913 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
914 ARM_COMPUTE_ERROR_ON(input == nullptr);
915 ARM_COMPUTE_ERROR_ON(output == nullptr);
916
917 // Create and configure function
918 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
919 func->configure(input, output);
920
921 // Log info
922 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
923 << " Target " << TargetInfo::TargetType
924 << " Data Type: " << input->info()->data_type()
925 << " Input shape: " << input->info()->tensor_shape()
926 << " Output shape: " << output->info()->tensor_shape()
927 << std::endl);
928
929 return std::move(func);
930}
931
932/** Create a backend resize layer function
933 *
934 * @tparam ResizeLayerFunction Backend resize function
935 * @tparam TargetInfo Target-specific information
936 *
937 * @param[in] node Node to create the backend function for
938 *
939 * @return Backend resize layer function
940 */
941template <typename ResizeLayerFunction, typename TargetInfo>
942std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
943{
944 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
945
946 // Extract IO and info
947 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
948 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
949 ARM_COMPUTE_ERROR_ON(input == nullptr);
950 ARM_COMPUTE_ERROR_ON(output == nullptr);
951 const InterpolationPolicy policy = node.policy();
952
953 // Create and configure function
954 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
955 func->configure(input, output, policy, BorderMode::CONSTANT);
956
957 // Log info
958 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
959 << " Target " << TargetInfo::TargetType
960 << " Data Type: " << input->info()->data_type()
961 << " Input shape: " << input->info()->tensor_shape()
962 << " Output shape: " << output->info()->tensor_shape()
963 << " Interpolation: " << policy
964 << std::endl);
965
966 return std::move(func);
967}
968
Michele Di Giorgioc30b6682018-09-12 17:44:08 +0100969/** Create a backend slice layer function
970 *
971 * @tparam SliceLayerFunction Backend slice function
972 * @tparam TargetInfo Target-specific information
973 *
974 * @param[in] node Node to create the backend function for
975 *
976 * @return Backend slice layer function
977 */
978template <typename SliceLayerFunction, typename TargetInfo>
979std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
980{
981 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
982
983 // Extract IO and info
984 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
985 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
986 ARM_COMPUTE_ERROR_ON(input == nullptr);
987 ARM_COMPUTE_ERROR_ON(output == nullptr);
988
989 // Create and configure function
990 auto func = support::cpp14::make_unique<SliceLayerFunction>();
991 func->configure(input, output, node.starts(), node.ends());
992
993 // Log info
994 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
995 << " Target " << TargetInfo::TargetType
996 << " Data Type: " << input->info()->data_type()
997 << " Input shape: " << input->info()->tensor_shape()
998 << " Output shape: " << output->info()->tensor_shape()
999 << std::endl);
1000
1001 return std::move(func);
1002}
1003
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001004/** Create a backend softmax layer function
1005 *
1006 * @tparam SoftmaxLayerFunction Backend softmax function
1007 * @tparam TargetInfo Target-specific information
1008 *
1009 * @param[in] node Node to create the backend function for
1010 * @param[in] ctx Graph context
1011 *
1012 * @return Backend softmax layer function
1013 */
1014template <typename SoftmaxLayerFunction, typename TargetInfo>
1015std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1016{
1017 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1018
1019 // Extract IO and info
1020 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1021 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1022 const float beta = node.beta();
1023 ARM_COMPUTE_ERROR_ON(input == nullptr);
1024 ARM_COMPUTE_ERROR_ON(output == nullptr);
1025
1026 // Create and configure function
1027 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1028 func->configure(input, output, beta);
1029
1030 // Log info
1031 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1032 << " Target " << TargetInfo::TargetType
1033 << " Data Type: " << input->info()->data_type()
1034 << " Input shape: " << input->info()->tensor_shape()
1035 << " Output shape: " << output->info()->tensor_shape()
1036 << std::endl);
1037
1038 return std::move(func);
1039}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001040/** Create a backend Upsample layer function
1041 *
1042 * @tparam UpsampleLayerFunction Backend Upsample function
1043 * @tparam TargetInfo Target-specific information
1044 *
1045 * @param[in] node Node to create the backend function for
1046 * @param[in] ctx Graph context
1047 *
1048 * @return Backend Upsample layer function
1049 */
1050template <typename UpsampleLayerFunction, typename TargetInfo>
1051std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1052{
1053 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1054
1055 // Extract IO and info
1056 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1057 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1058 const Size2D info = node.info();
1059 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1060 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1061 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1062 ARM_COMPUTE_ERROR_ON(input == nullptr);
1063 ARM_COMPUTE_ERROR_ON(output == nullptr);
1064
1065 // Create and configure function
1066 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1067 func->configure(input, output, info, upsampling_policy);
1068
1069 // Log info
1070 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1071 << " Target " << TargetInfo::TargetType
1072 << " Data Type: " << input->info()->data_type()
1073 << " Input shape: " << input->info()->tensor_shape()
1074 << " Output shape: " << output->info()->tensor_shape()
1075 << " Strides: " << info
1076 << " Upsampling policy: " << upsampling_policy
1077 << std::endl);
1078
1079 return std::move(func);
1080}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001081/** Create a backend YOLO layer function
1082 *
1083 * @tparam YoloLayerFunction Backend YOLO function
1084 * @tparam TargetInfo Target-specific information
1085 *
1086 * @param[in] node Node to create the backend function for
1087 * @param[in] ctx Graph context
1088 *
1089 * @return Backend YOLO layer function
1090 */
1091template <typename YOLOlayerFunction, typename TargetInfo>
1092std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1093{
1094 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1095
1096 // Extract IO and info
1097 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1098 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1099 const ActivationLayerInfo act_info = node.activation_info();
1100 const int32_t num_classes = node.num_classes();
1101 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1102 ARM_COMPUTE_ERROR_ON(input == nullptr);
1103 ARM_COMPUTE_ERROR_ON(output == nullptr);
1104
1105 // Create and configure function
1106 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1107 func->configure(input, output, act_info, num_classes);
1108
1109 // Log info
1110 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1111 << " Target " << TargetInfo::TargetType
1112 << " Data Type: " << input->info()->data_type()
1113 << " Input shape: " << input->info()->tensor_shape()
1114 << " Output shape: " << output->info()->tensor_shape()
1115 << " Activation function: " << act_info.activation()
1116 << " Num classes: " << num_classes
1117 << std::endl);
1118
1119 return std::move(func);
1120}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001121} // namespace detail
1122} // namespace backends
1123} // namespace graph
1124} // namespace arm_compute
1125
1126#endif /* __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__ */