blob: 96adffee46e572991d810f0114b60d7a8d6ae41c [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
25#define __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
26
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
31#include "arm_compute/graph/backends/Utils.h"
32#include "arm_compute/graph/nodes/Nodes.h"
33
34#include "arm_compute/core/Error.h"
35#include "arm_compute/core/Helpers.h"
36#include "arm_compute/core/ITensorInfo.h"
37#include "arm_compute/core/utils/misc/Cast.h"
38
39namespace arm_compute
40{
41namespace graph
42{
43namespace backends
44{
45namespace detail
46{
47/** Returns backing tensor of a given tensor
48 *
49 * @tparam TargetInfo Target information
50 *
51 * @param[in] tensor Tensor to extract the backing tensor from
52 *
53 * @return Backing tensor if present else nullptr
54 */
55template <typename TargetInfo>
56typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
57{
58 typename TargetInfo::TensorType *backing_tensor = nullptr;
59 if(tensor != nullptr)
60 {
61 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
62 // Get backing tensor handle
63 ITensorHandle *tensor_handle = tensor->handle();
64 // Get backing tensor
65 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
66 }
67
68 return backing_tensor;
69}
70
71template <typename TargetInfo>
72void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
73{
74 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000075 << " Target: " << TargetInfo::TargetType
76 << " ID: " << node.id()
77 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010078 << std::endl);
79
80 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
81 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
82 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
83}
84
85/** Creates a backend activation layer function
86 *
87 * @tparam ActivationLayerFunction Backend activation function
88 * @tparam TargetInfo Target-specific information
89 *
90 * @param[in] node Node to create the backend function for
91 *
92 * @return Backend activation layer function
93 */
94template <typename ActivationLayerFunction, typename TargetInfo>
95std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
96{
97 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
98
99 // Extract IO and info
100 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
101 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
102 const ActivationLayerInfo act_info = node.activation_info();
103
104 // Create function
105 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
106 func->configure(input, output, act_info);
107
Pablo Tello32521432018-11-15 14:43:10 +0000108 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
109 << node.name()
110 << " Type: " << node.type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100111 << " Target " << TargetInfo::TargetType
112 << " Data Type: " << input->info()->data_type()
113 << " Shape: " << input->info()->tensor_shape()
114 << " Activation function: " << act_info.activation()
115 << " a: " << act_info.a()
116 << " b: " << act_info.b()
117 << " InPlace : " << is_in_place_operation(input, output)
118 << std::endl);
119
120 return std::move(func);
121}
122
123/** Create a backend batch normalization layer function
124 *
125 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
126 * @tparam TargetInfo Target-specific information
127 *
128 * @param[in] node Node to create the backend function for
129 *
130 * @return Backend batch normalization layer function
131 */
132template <typename BatchNormalizationLayerFunction, typename TargetInfo>
133std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
134{
135 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
136
137 // Extract IO and info
138 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
139 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
140 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
141 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
142 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
143 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
144 const float epsilon = node.epsilon();
145 const ActivationLayerInfo fused_act = node.fused_activation();
146
147 // Create and configure function
148 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
149 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
150
151 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000152 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
153 << node.name()
154 << " Type: " << node.type()
155 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100156 << " Data Type: " << input->info()->data_type()
157 << " Shape: " << input->info()->tensor_shape()
158 << " Epsilon: " << epsilon << " "
159 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000160 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100161 << std::endl);
162
163 return std::move(func);
164}
165
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100166/** Create a backend bounding box transform layer function
167 *
168 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
169 * @tparam TargetInfo Target-specific information
170 *
171 * @param[in] node Node to create the backend function for
172 *
173 * @return Backend bounding box transform layer function
174 */
175template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
176std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
177{
178 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
179
180 // Extract IO and info
181 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
182 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
183 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
184 const BoundingBoxTransformInfo bbox_info = node.info();
185
186 // Create and configure function
187 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
188 func->configure(input, output, deltas, bbox_info);
189
190 // Log info
191 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
192 << " Target " << TargetInfo::TargetType
193 << " Data Type: " << input->info()->data_type()
194 << " Shape: " << input->info()->tensor_shape()
195 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
196 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
197 << std::endl);
198
199 return std::move(func);
200}
201
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100202/** Create a backend channel shuffle layer function
203 *
204 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
205 * @tparam TargetInfo Target-specific information
206 *
207 * @param[in] node Node to create the backend function for
208 *
209 * @return Backend channel shuffle layer function
210 */
211template <typename ChannelShuffleLayerFunction, typename TargetInfo>
212std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
213{
214 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
215
216 // Extract IO and info
217 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
218 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
219 const unsigned int num_groups = node.num_groups();
220
221 // Create function
222 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
223 func->configure(input, output, num_groups);
224
Pablo Tello32521432018-11-15 14:43:10 +0000225 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
226 << node.name()
227 << " Type: " << node.type()
228 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100229 << " Data Type: " << input->info()->data_type()
230 << " Shape: " << input->info()->tensor_shape()
231 << " Num groups: " << num_groups
232 << std::endl);
233
234 return std::move(func);
235}
236
Georgios Pinitase2220552018-07-20 13:23:44 +0100237/** Create a backend layer concatenate function
238 *
239 * @tparam ConcatenateLayerFunction Backend concatenate function
240 * @tparam TargetInfo Target-specific information
241 *
242 * @param[in] node Node to create the backend function for
243 *
244 * @return Backend concatenate layer function
245 */
246template <typename ConcatenateLayerFunction, typename TargetInfo>
247std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
248{
249 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
250 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
251
252 // Return nullptr if depth concatenate is switched off
253 if(!node.is_enabled())
254 {
255 return nullptr;
256 }
257
258 // Extract IO and info
259 std::vector<typename TargetInfo::TensorType *> inputs;
260 for(unsigned int i = 0; i < node.num_inputs(); ++i)
261 {
262 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
263 }
264 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
265 const DataLayoutDimension concat_axis = node.concatenation_axis();
266
267 // Create and configure function
268 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
269 func->configure(inputs, output, concat_axis);
270
271 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000272 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
273 << node.name()
274 << " Type: " << node.type()
275 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100276 << " Data Type: " << output->info()->data_type()
277 << " Shape: " << output->info()->tensor_shape()
278 << " Num Inputs: " << inputs.size()
279 << " Axis: " << concat_axis
280 << std::endl);
281
282 return std::move(func);
283}
284
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100285/** Create a backend convolution layer function
286 *
287 * @tparam ConvolutionLayerFunctions Backend convolution functions
288 * @tparam TargetInfo Target-specific information
289 *
290 * @param[in] node Node to create the backend function for
291 * @param[in] ctx Graph context
292 *
293 * @return Backend convolution layer function
294 */
295template <typename ConvolutionLayerFunctions, typename TargetInfo>
296std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
297{
298 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
299
300 // Extract IO and info
301 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
302 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
303 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
304 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
305
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100306 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
307
308 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100309 {
310 biases->info()->set_data_type(DataType::S32);
311 }
312
Georgios Pinitas08346e92018-10-16 19:10:46 +0100313 const PadStrideInfo conv_info = node.convolution_info();
314 const unsigned int num_groups = node.num_groups();
315 const ConvolutionMethod conv_algorithm = node.convolution_method();
316 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
317 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100318
319 // Create and configure function (we assume that functions have been validated before creation)
320 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
321 std::unique_ptr<IFunction> func;
322 std::string func_name;
323
Georgios Pinitase2220552018-07-20 13:23:44 +0100324 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100325 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100326 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100327 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
328 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100329 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100330 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100331 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100332 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100333 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100334 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
335 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100336 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100337 }
338 else if(conv_algorithm == ConvolutionMethod::GEMM)
339 {
340 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
341 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100342 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100343 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100344 }
345 else
346 {
347 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
348 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100349 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100350 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100351 }
352
353 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100354 std::ostringstream qss;
355 if(is_quantized)
356 {
357 qss << " Input QuantInfo: " << input->info()->quantization_info()
358 << " Weights QuantInfo: " << weights->info()->quantization_info()
359 << " Output QuantInfo: " << output->info()->quantization_info();
360 }
Pablo Tello32521432018-11-15 14:43:10 +0000361 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
362 << node.name()
363 << " Type: " << func_name
364 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100365 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100366 << " Groups: " << num_groups
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100367 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100368 << " Input shape: " << input->info()->tensor_shape()
369 << " Weights shape: " << weights->info()->tensor_shape()
370 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100371 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100372 << std::endl);
373 return func;
374}
375
376/** Create a backend deconvolution layer function
377 *
378 * @tparam DeconvolutionLayerFunction Backend deconvolution function
379 * @tparam TargetInfo Target-specific information
380 *
381 * @param[in] node Node to create the backend function for
382 * @param[in] ctx Graph context
383 *
384 * @return Backend deconvolution layer function
385 */
386template <typename DeconvolutionLayerFunction, typename TargetInfo>
387std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
388{
389 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
390
391 // Extract IO and info
392 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
393 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
394 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
395 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
396
397 const PadStrideInfo deconv_info = node.deconvolution_info();
398 const Size2D inner_border = node.inner_border();
399
400 // Create and configure function (we assume that functions have been validated before creation)
401 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
402 std::unique_ptr<IFunction> func;
403
404 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
405 std::string(), mm,
406 input, weights, biases, output, deconv_info, inner_border.x(), inner_border.y());
407
408 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000409 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
410 << node.name()
411 << " Type: " << node.type()
412 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100413 << " Data Type: " << input->info()->data_type()
414 << " Input shape: " << input->info()->tensor_shape()
415 << " Weights shape: " << weights->info()->tensor_shape()
416 << " Output shape: " << output->info()->tensor_shape()
417 << std::endl);
418 return func;
419}
420
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100421/** Create a backend layer depth-wise convolution function
422 *
423 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
424 * @tparam TargetInfo Target-specific information
425 *
426 * @param[in] node Node to create the backend function for
427 *
428 * @return Backend depth-wise convolution layer function
429 */
430template <typename DepthwiseConvolutionLayerFunctions, typename TargetInfo>
431std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
432{
433 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
434
435 // Extract IO and info
436 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
437 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
438 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
439 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
440
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100441 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
442
443 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100444 {
445 biases->info()->set_data_type(DataType::S32);
446 }
447
Georgios Pinitas60e98252018-10-22 16:17:20 +0100448 const PadStrideInfo conv_info = node.convolution_info();
449 const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
Georgios Pinitas05045c12018-12-07 18:31:47 +0000450 const unsigned int depth_multiplier = node.depth_multiplier();
Georgios Pinitas60e98252018-10-22 16:17:20 +0100451 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100452
453 // Create and configure function (we assume that functions have been validated before creation)
454 std::unique_ptr<IFunction> func;
455 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100456 if(dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100457 {
458 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::DepthwiseConvolutionLayer3x3>(
459 std::string("DepthwiseConvolutionLayer3x3"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100460 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100461 }
462 else
463 {
464 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::GenericDepthwiseConvolutionLayer>(
465 std::string("DepthwiseConvolutionLayer"),
Georgios Pinitas60e98252018-10-22 16:17:20 +0100466 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100467 }
468
469 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100470 std::ostringstream qss;
471 if(is_quantized)
472 {
473 qss << " Input QuantInfo: " << input->info()->quantization_info()
474 << " Weights QuantInfo: " << weights->info()->quantization_info()
475 << " Output QuantInfo: " << output->info()->quantization_info();
476 }
Pablo Tello32521432018-11-15 14:43:10 +0000477 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
478 << node.name()
479 << " Type: " << func_name
480 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100481 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100482 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100483 << " Input shape: " << input->info()->tensor_shape()
484 << " Weights shape: " << weights->info()->tensor_shape()
485 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000486 << " Depth multiplier: " << depth_multiplier
Georgios Pinitas60e98252018-10-22 16:17:20 +0100487 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100488 << std::endl);
489 return func;
490}
491
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000492/** Create a backend detection output layer function
493 *
494 * @tparam DetectionOutputLayer Function Backend detection output function
495 * @tparam TargetInfo Target-specific information
496 *
497 * @param[in] node Node to create the backend function for
498 *
499 * @return Backend detection output layer function
500 */
501template <typename DetectionOutputLayerFunction, typename TargetInfo>
502std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
503{
504 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
505
506 // Extract IO and info
507 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
508 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
509 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
510 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
511 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
512
513 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
514 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
515 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
516 ARM_COMPUTE_ERROR_ON(output == nullptr);
517
518 // Create and configure function
519 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
520 func->configure(input0, input1, input2, output, detect_info);
521
522 // Log info
523 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
524 << node.name()
525 << " Type: " << node.type()
526 << " Target: " << TargetInfo::TargetType
527 << " Data Type: " << input0->info()->data_type()
528 << " Input0 shape: " << input0->info()->tensor_shape()
529 << " Input1 shape: " << input1->info()->tensor_shape()
530 << " Input2 shape: " << input2->info()->tensor_shape()
531 << " Output shape: " << output->info()->tensor_shape()
532 << " DetectionOutputLayer info: " << detect_info
533 << std::endl);
534
535 return std::move(func);
536}
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100537/** Create a backend element-wise operation layer function
538 *
539 * @tparam EltwiseFunctions Backend element-wise function
540 * @tparam TargetInfo Target-specific information
541 *
542 * @param[in] node Node to create the backend function for
543 *
544 * @return Backend element-wise operation layer function
545 */
546template <typename EltwiseFunctions, typename TargetInfo>
547std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
548{
549 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
550
551 // Extract IO and info
552 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
553 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
554 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
555 const EltwiseOperation eltwise_op = node.eltwise_operation();
556 const ConvertPolicy convert_policy = node.convert_policy();
557 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
558 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
559 ARM_COMPUTE_ERROR_ON(output == nullptr);
560
561 std::unique_ptr<IFunction> func = nullptr;
562 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100563 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100564 {
565 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
566 std::string("ArithmeticAddition"),
567 input1, input2, output, convert_policy);
568 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100569 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100570 {
571 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
572 std::string("ArithmeticSubtraction"),
573 input1, input2, output, convert_policy);
574 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100575 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100576 {
577 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
578 std::string("PixelWiseMultiplication"),
579 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
580 }
581 else
582 {
583 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
584 }
585
586 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000587 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
588 << node.name()
589 << " Type: " << node.type()
590 << " Target: " << TargetInfo::TargetType
591 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100592 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000593 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100594 << std::endl);
595
596 return func;
597}
598
599/** Create a backend flatten layer function
600 *
601 * @tparam FlattenLayerFunction Backend flatten function
602 * @tparam TargetInfo Target-specific information
603 *
604 * @param[in] node Node to create the backend function for
605 *
606 * @return Backend flatten layer function
607 */
608template <typename FlattenLayerFunction, typename TargetInfo>
609std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
610{
611 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
612
613 // Extract IO and info
614 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
615 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
616
Georgios Pinitase2220552018-07-20 13:23:44 +0100617 ARM_COMPUTE_ERROR_ON(input == nullptr);
618 ARM_COMPUTE_ERROR_ON(output == nullptr);
619
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100620 // Create and configure function
621 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
622 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100623
624 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000625 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
626 << node.name()
627 << " Type: " << node.type()
628 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100629 << " Data Type: " << input->info()->data_type()
630 << " Input shape: " << input->info()->tensor_shape()
631 << " Output shape: " << output->info()->tensor_shape()
632 << std::endl);
633
634 return std::move(func);
635}
636
637/** Create a backend fully connected layer function
638 *
639 * @tparam FullyConnectedLayerFunction Backend fully-connected function
640 * @tparam TargetInfo Target-specific information
641 *
642 * @param[in] node Node to create the backend function for
643 * @param[in] ctx Graph context
644 *
645 * @return Backend fully connected layer function
646 */
647template <typename FullyConnectedLayerFunction, typename TargetInfo>
648std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
649{
650 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
651
652 // Extract IO and info
653 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
654 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
655 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
656 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100657 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100658
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100659 ARM_COMPUTE_ERROR_ON(input == nullptr);
660 ARM_COMPUTE_ERROR_ON(weights == nullptr);
661 ARM_COMPUTE_ERROR_ON(output == nullptr);
662
Georgios Pinitase2220552018-07-20 13:23:44 +0100663 // Create and configure function
664 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
665 func->configure(input, weights, biases, output, fc_info);
666
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100667 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
668
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100669 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100670 std::ostringstream qss;
671 if(is_quantized)
672 {
673 qss << " Input QuantInfo: " << input->info()->quantization_info()
674 << " Weights QuantInfo: " << weights->info()->quantization_info()
675 << " Output QuantInfo: " << output->info()->quantization_info();
676 }
Pablo Tello32521432018-11-15 14:43:10 +0000677 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
678 << node.name()
679 << " Type: " << node.type()
680 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100681 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100682 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100683 << " Input shape: " << input->info()->tensor_shape()
684 << " Weights shape: " << weights->info()->tensor_shape()
685 << " Output shape: " << output->info()->tensor_shape()
686 << std::endl);
687
688 return std::move(func);
689}
690
Michele Di Giorgio47e6fed2018-11-13 12:04:25 +0000691/** Create a backend generate proposals layer function
692 *
693 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
694 * @tparam TargetInfo Target-specific information
695 *
696 * @param[in] node Node to create the backend function for
697 * @param[in] ctx Graph context
698 *
699 * @return Backend generate proposals layer function
700 */
701template <typename GenerateProposalsLayerFunction, typename TargetInfo>
702std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
703{
704 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
705
706 // Extract IO and info
707 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
708 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
709 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
710 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
711 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
712 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
713 const GenerateProposalsInfo info = node.info();
714
715 ARM_COMPUTE_ERROR_ON(scores == nullptr);
716 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
717 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
718 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
719 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
720
721 // Create and configure function
722 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
723 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
724
725 // Log info
726 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
727 << " Target " << TargetInfo::TargetType
728 << " Data Type: " << scores->info()->data_type()
729 << " Scores shape: " << scores->info()->tensor_shape()
730 << " Deltas shape: " << deltas->info()->tensor_shape()
731 << " Anchors shape: " << anchors->info()->tensor_shape()
732 << " Proposals shape: " << proposals->info()->tensor_shape()
733 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
734 << " Scores Out shape: " << scores_out->info()->tensor_shape()
735 << std::endl);
736
737 return std::move(func);
738}
739
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100740/** Create a backend normalization layer function
741 *
742 * @tparam NormalizationLayerFunction Backend normalization function
743 * @tparam TargetInfo Target-specific information
744 *
745 * @param[in] node Node to create the backend function for
746 * @param[in] ctx Graph context
747 *
748 * @return Backend normalization layer function
749 */
750template <typename NormalizationLayerFunction, typename TargetInfo>
751std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
752{
753 ARM_COMPUTE_UNUSED(ctx);
754
755 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
756
757 // Extract IO and info
758 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
759 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
760 const NormalizationLayerInfo norm_info = node.normalization_info();
761 ARM_COMPUTE_ERROR_ON(input == nullptr);
762 ARM_COMPUTE_ERROR_ON(output == nullptr);
763
764 // Create and configure function
765 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
766 func->configure(input, output, norm_info);
767
768 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000769 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
770 << node.name()
771 << " Type: " << node.type()
772 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100773 << " Data Type: " << input->info()->data_type()
774 << " Input shape: " << input->info()->tensor_shape()
775 << " Output shape: " << output->info()->tensor_shape()
776 << " Normalization info: " << norm_info.type()
777 << std::endl);
778
779 return std::move(func);
780}
781
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100782/** Create a backend normalize planar YUV layer function
783 *
784 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
785 * @tparam TargetInfo Target-specific information
786 *
787 * @param[in] node Node to create the backend function for
788 *
789 * @return Backend normalize plnar YUV layer function
790 */
791template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
792std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
793{
794 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
795
796 // Extract IO and info
797 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
798 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
799 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
800 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
801 ARM_COMPUTE_ERROR_ON(input == nullptr);
802 ARM_COMPUTE_ERROR_ON(mean == nullptr);
803 ARM_COMPUTE_ERROR_ON(std == nullptr);
804 ARM_COMPUTE_ERROR_ON(output == nullptr);
805
806 // Create and configure function
807 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
808 func->configure(input, output, mean, std);
809
810 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000811 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
812 << node.name()
813 << " Type: " << node.type()
814 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +0100815 << " Data Type: " << input->info()->data_type()
816 << " Shape: " << input->info()->tensor_shape()
817 << std::endl);
818
819 return std::move(func);
820}
821
Michele Di Giorgio4bb17332018-09-26 13:56:51 +0100822/** Create a backend pad layer function
823 *
824 * @tparam PadLayerFunction Backend pad function
825 * @tparam TargetInfo Target-specific information
826 *
827 * @param[in] node Node to create the backend function for
828 *
829 * @return Backend pad layer function
830 */
831template <typename PadLayerFunction, typename TargetInfo>
832std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
833{
834 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
835
836 // Extract IO and info
837 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
838 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
839 const PaddingList &padding = node.padding();
840 ARM_COMPUTE_ERROR_ON(input == nullptr);
841 ARM_COMPUTE_ERROR_ON(output == nullptr);
842
843 // Create and configure function
844 auto func = support::cpp14::make_unique<PadLayerFunction>();
845 func->configure(input, output, padding);
846
847 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000848 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
849 << node.name()
850 << " Type: " << node.type()
851 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +0100852 << " Data Type: " << input->info()->data_type()
853 << " Input shape: " << input->info()->tensor_shape()
854 << " Output shape: " << output->info()->tensor_shape()
855 << std::endl);
856
857 return std::move(func);
858}
859
Georgios Pinitas57c48242018-08-02 13:41:49 +0100860/** Create a backend permute layer function
861 *
862 * @tparam PermuteLayerFunction Backend permute function
863 * @tparam TargetInfo Target-specific information
864 *
865 * @param[in] node Node to create the backend function for
866 *
867 * @return Backend permute layer function
868 */
869template <typename PermuteLayerFunction, typename TargetInfo>
870std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
871{
872 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
873
874 // Extract IO and info
875 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
876 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
877 const PermutationVector &perm = node.permutation_vector();
878 ARM_COMPUTE_ERROR_ON(input == nullptr);
879 ARM_COMPUTE_ERROR_ON(output == nullptr);
880
881 // Create and configure function
882 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
883 func->configure(input, output, perm);
884
885 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000886 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
887 << node.name()
888 << " Type: " << node.type()
889 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +0100890 << " Data Type: " << input->info()->data_type()
891 << " Input shape: " << input->info()->tensor_shape()
892 << " Output shape: " << output->info()->tensor_shape()
893 << " Permutation vector: " << perm
894 << std::endl);
895
896 return std::move(func);
897}
898
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100899/** Create a backend pooling layer function
900 *
901 * @tparam PoolingLayerFunction Backend pooling function
902 * @tparam TargetInfo Target-specific information
903 *
904 * @param[in] node Node to create the backend function for
905 *
906 * @return Backend pooling layer function
907 */
908template <typename PoolingLayerFunction, typename TargetInfo>
909std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
910{
911 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
912
913 // Extract IO and info
914 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
915 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
916 const PoolingLayerInfo pool_info = node.pooling_info();
917 ARM_COMPUTE_ERROR_ON(input == nullptr);
918 ARM_COMPUTE_ERROR_ON(output == nullptr);
919
920 // Create and configure function
921 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
922 func->configure(input, output, pool_info);
923
924 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000925 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
926 << node.name()
927 << " Type: " << node.type()
928 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100929 << " Data Type: " << input->info()->data_type()
930 << " Input shape: " << input->info()->tensor_shape()
931 << " Output shape: " << output->info()->tensor_shape()
932 << " Pooling info: " << pool_info.pool_type()
933 << std::endl);
934
935 return std::move(func);
936}
937
Pablo Tello32521432018-11-15 14:43:10 +0000938/** Create a backend priorbox layer function
939 *
940 * @tparam PriorBoxLayerFunction Backend priorbox function
941 * @tparam TargetInfo Target-specific information
942 *
943 * @param[in] node Node to create the backend function for
944 *
945 * @return Backend priorbox layer function
946 */
947template <typename PriorBoxLayerFunction, typename TargetInfo>
948std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
949{
950 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
951
952 // Extract IO and info
953 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
954 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
955 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
956 const PriorBoxLayerInfo prior_info = node.priorbox_info();
957 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
958 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
959 ARM_COMPUTE_ERROR_ON(output == nullptr);
960
961 // Create and configure function
962 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
963 func->configure(input0, input1, output, prior_info);
964
965 // Log info
966 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
967 << node.name()
968 << " Type: " << node.type()
969 << " Target: " << TargetInfo::TargetType
970 << " Data Type: " << input0->info()->data_type()
971 << " Input0 shape: " << input0->info()->tensor_shape()
972 << " Input1 shape: " << input1->info()->tensor_shape()
973 << " Output shape: " << output->info()->tensor_shape()
974 << " PriorBoxLayer info: " << prior_info
975 << std::endl);
976
977 return std::move(func);
978}
979
Gian Marco Iodice23e24792018-09-07 15:32:14 +0100980/** Create a backend reorg layer function
981 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +0100982 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +0100983 * @tparam TargetInfo Target-specific information
984 *
985 * @param[in] node Node to create the backend function for
986 *
987 * @return Backend reshape layer function
988 */
989template <typename ReorgLayerFunction, typename TargetInfo>
990std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
991{
992 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
993
994 // Extract IO and info
995 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
996 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
997 ARM_COMPUTE_ERROR_ON(input == nullptr);
998 ARM_COMPUTE_ERROR_ON(output == nullptr);
999
1000 // Create and configure function
1001 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1002 func->configure(input, output, node.stride());
1003
1004 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001005 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1006 << node.name()
1007 << " Type: " << node.type()
1008 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001009 << " Data Type: " << input->info()->data_type()
1010 << " Input shape: " << input->info()->tensor_shape()
1011 << " Output shape: " << output->info()->tensor_shape()
1012 << std::endl);
1013
1014 return std::move(func);
1015}
1016
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001017/** Create a backend reshape layer function
1018 *
1019 * @tparam ReshapeLayerFunction Backend reshape function
1020 * @tparam TargetInfo Target-specific information
1021 *
1022 * @param[in] node Node to create the backend function for
1023 *
1024 * @return Backend reshape layer function
1025 */
1026template <typename ReshapeLayerFunction, typename TargetInfo>
1027std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1028{
1029 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1030
1031 // Extract IO and info
1032 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1033 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1034 ARM_COMPUTE_ERROR_ON(input == nullptr);
1035 ARM_COMPUTE_ERROR_ON(output == nullptr);
1036
1037 // Create and configure function
1038 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1039 func->configure(input, output);
1040
1041 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001042 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1043 << node.name()
1044 << " Type: " << node.type()
1045 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001046 << " Data Type: " << input->info()->data_type()
1047 << " Input shape: " << input->info()->tensor_shape()
1048 << " Output shape: " << output->info()->tensor_shape()
1049 << std::endl);
1050
1051 return std::move(func);
1052}
1053
1054/** Create a backend resize layer function
1055 *
1056 * @tparam ResizeLayerFunction Backend resize function
1057 * @tparam TargetInfo Target-specific information
1058 *
1059 * @param[in] node Node to create the backend function for
1060 *
1061 * @return Backend resize layer function
1062 */
1063template <typename ResizeLayerFunction, typename TargetInfo>
1064std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1065{
1066 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1067
1068 // Extract IO and info
1069 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1070 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1071 ARM_COMPUTE_ERROR_ON(input == nullptr);
1072 ARM_COMPUTE_ERROR_ON(output == nullptr);
1073 const InterpolationPolicy policy = node.policy();
1074
1075 // Create and configure function
1076 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
1077 func->configure(input, output, policy, BorderMode::CONSTANT);
1078
1079 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001080 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1081 << node.name()
1082 << " Type: " << node.type()
1083 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001084 << " Data Type: " << input->info()->data_type()
1085 << " Input shape: " << input->info()->tensor_shape()
1086 << " Output shape: " << output->info()->tensor_shape()
1087 << " Interpolation: " << policy
1088 << std::endl);
1089
1090 return std::move(func);
1091}
1092
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001093/** Create a backend ROI align layer function
1094 *
1095 * @tparam ROIAlignLayerFunction ROI Align function
1096 * @tparam TargetInfo Target-specific information
1097 *
1098 * @param[in] node Node to create the backend function for
1099 *
1100 * @return ROI Align layer function
1101 */
1102template <typename ROIAlignLayerFunction, typename TargetInfo>
1103std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1104{
1105 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1106
1107 // Extract IO and info
1108 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1109 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1110 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1111 ARM_COMPUTE_ERROR_ON(input == nullptr);
1112 ARM_COMPUTE_ERROR_ON(output == nullptr);
1113 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1114
1115 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1116
1117 // Create and configure function
1118 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1119
1120 func->configure(input, rois, output, pool_info);
1121
1122 // Log info
1123 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1124 << " Target " << TargetInfo::TargetType
1125 << " Data Type: " << input->info()->data_type()
1126 << " Input shape: " << input->info()->tensor_shape()
1127 << " Output shape: " << output->info()->tensor_shape()
1128 << " ROIs shape: " << rois->info()->tensor_shape()
1129 << " ROIPooling width: " << pool_info.pooled_width()
1130 << " ROIPooling height: " << pool_info.pooled_height()
1131 << std::endl);
1132
1133 return std::move(func);
1134}
1135
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001136/** Create a backend slice layer function
1137 *
1138 * @tparam SliceLayerFunction Backend slice function
1139 * @tparam TargetInfo Target-specific information
1140 *
1141 * @param[in] node Node to create the backend function for
1142 *
1143 * @return Backend slice layer function
1144 */
1145template <typename SliceLayerFunction, typename TargetInfo>
1146std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1147{
1148 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1149
1150 // Extract IO and info
1151 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1152 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1153 ARM_COMPUTE_ERROR_ON(input == nullptr);
1154 ARM_COMPUTE_ERROR_ON(output == nullptr);
1155
1156 // Create and configure function
1157 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1158 func->configure(input, output, node.starts(), node.ends());
1159
1160 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001161 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1162 << node.name()
1163 << " Type: " << node.type()
1164 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001165 << " Data Type: " << input->info()->data_type()
1166 << " Input shape: " << input->info()->tensor_shape()
1167 << " Output shape: " << output->info()->tensor_shape()
1168 << std::endl);
1169
1170 return std::move(func);
1171}
1172
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001173/** Create a backend softmax layer function
1174 *
1175 * @tparam SoftmaxLayerFunction Backend softmax function
1176 * @tparam TargetInfo Target-specific information
1177 *
1178 * @param[in] node Node to create the backend function for
1179 * @param[in] ctx Graph context
1180 *
1181 * @return Backend softmax layer function
1182 */
1183template <typename SoftmaxLayerFunction, typename TargetInfo>
1184std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1185{
1186 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1187
1188 // Extract IO and info
1189 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1190 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1191 const float beta = node.beta();
1192 ARM_COMPUTE_ERROR_ON(input == nullptr);
1193 ARM_COMPUTE_ERROR_ON(output == nullptr);
1194
1195 // Create and configure function
1196 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1197 func->configure(input, output, beta);
1198
1199 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001200 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1201 << node.name()
1202 << " Type: " << node.type()
1203 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001204 << " Data Type: " << input->info()->data_type()
1205 << " Input shape: " << input->info()->tensor_shape()
1206 << " Output shape: " << output->info()->tensor_shape()
1207 << std::endl);
1208
1209 return std::move(func);
1210}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001211/** Create a backend Upsample layer function
1212 *
1213 * @tparam UpsampleLayerFunction Backend Upsample function
1214 * @tparam TargetInfo Target-specific information
1215 *
1216 * @param[in] node Node to create the backend function for
1217 * @param[in] ctx Graph context
1218 *
1219 * @return Backend Upsample layer function
1220 */
1221template <typename UpsampleLayerFunction, typename TargetInfo>
1222std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1223{
1224 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1225
1226 // Extract IO and info
1227 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1228 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1229 const Size2D info = node.info();
1230 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1231 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1232 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1233 ARM_COMPUTE_ERROR_ON(input == nullptr);
1234 ARM_COMPUTE_ERROR_ON(output == nullptr);
1235
1236 // Create and configure function
1237 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1238 func->configure(input, output, info, upsampling_policy);
1239
1240 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001241 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1242 << node.name()
1243 << " Type: " << node.type()
1244 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001245 << " Data Type: " << input->info()->data_type()
1246 << " Input shape: " << input->info()->tensor_shape()
1247 << " Output shape: " << output->info()->tensor_shape()
1248 << " Strides: " << info
1249 << " Upsampling policy: " << upsampling_policy
1250 << std::endl);
1251
1252 return std::move(func);
1253}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001254/** Create a backend YOLO layer function
1255 *
1256 * @tparam YoloLayerFunction Backend YOLO function
1257 * @tparam TargetInfo Target-specific information
1258 *
1259 * @param[in] node Node to create the backend function for
1260 * @param[in] ctx Graph context
1261 *
1262 * @return Backend YOLO layer function
1263 */
1264template <typename YOLOlayerFunction, typename TargetInfo>
1265std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1266{
1267 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1268
1269 // Extract IO and info
1270 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1271 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1272 const ActivationLayerInfo act_info = node.activation_info();
1273 const int32_t num_classes = node.num_classes();
1274 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1275 ARM_COMPUTE_ERROR_ON(input == nullptr);
1276 ARM_COMPUTE_ERROR_ON(output == nullptr);
1277
1278 // Create and configure function
1279 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1280 func->configure(input, output, act_info, num_classes);
1281
1282 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001283 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1284 << node.name()
1285 << " Type: " << node.type()
1286 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001287 << " Data Type: " << input->info()->data_type()
1288 << " Input shape: " << input->info()->tensor_shape()
1289 << " Output shape: " << output->info()->tensor_shape()
1290 << " Activation function: " << act_info.activation()
1291 << " Num classes: " << num_classes
1292 << std::endl);
1293
1294 return std::move(func);
1295}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001296} // namespace detail
1297} // namespace backends
1298} // namespace graph
1299} // namespace arm_compute
1300
1301#endif /* __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__ */