blob: 60a03e67b07b67b71c6219c3cdb7c3f4cfb4f58f [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
Georgios Pinitasda2491f2018-06-01 17:49:09 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
25#define ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H
Georgios Pinitasda2491f2018-06-01 17:49:09 +010026
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
Georgios Pinitas9e4824c2019-04-12 13:15:58 +010031#include "arm_compute/graph/Utils.h"
giuros01acce5042019-02-21 17:32:34 +000032#include "arm_compute/graph/backends/FusedConvolutionBatchNormalizationFunction.h"
Manuel Bottinibffb41e2019-06-20 16:00:27 +010033#include "arm_compute/graph/backends/FusedDepthwiseConvolutionBatchNormalizationFunction.h"
Georgios Pinitasda2491f2018-06-01 17:49:09 +010034#include "arm_compute/graph/backends/Utils.h"
35#include "arm_compute/graph/nodes/Nodes.h"
36
37#include "arm_compute/core/Error.h"
38#include "arm_compute/core/Helpers.h"
39#include "arm_compute/core/ITensorInfo.h"
40#include "arm_compute/core/utils/misc/Cast.h"
41
42namespace arm_compute
43{
44namespace graph
45{
46namespace backends
47{
48namespace detail
49{
Georgios Pinitas0b192e82020-02-20 17:09:28 +000050// Address rule DR-9R5 (1579. Return by converting move constructor)
51#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5))
52#define RETURN_UNIQUE_PTR(x) (x)
53#else /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
54#define RETURN_UNIQUE_PTR(x) (std::move(x))
55#endif /* defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5)) */
56
Georgios Pinitasda2491f2018-06-01 17:49:09 +010057/** Returns backing tensor of a given tensor
58 *
59 * @tparam TargetInfo Target information
60 *
61 * @param[in] tensor Tensor to extract the backing tensor from
62 *
63 * @return Backing tensor if present else nullptr
64 */
65template <typename TargetInfo>
66typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
67{
68 typename TargetInfo::TensorType *backing_tensor = nullptr;
69 if(tensor != nullptr)
70 {
71 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
72 // Get backing tensor handle
73 ITensorHandle *tensor_handle = tensor->handle();
74 // Get backing tensor
75 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
76 }
77
78 return backing_tensor;
79}
80
81template <typename TargetInfo>
82void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
83{
84 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
Pablo Tello32521432018-11-15 14:43:10 +000085 << " Target: " << TargetInfo::TargetType
86 << " ID: " << node.id()
87 << node.name()
Georgios Pinitasda2491f2018-06-01 17:49:09 +010088 << std::endl);
89
90 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
91 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
92 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
Michalis Spyrou6bff1952019-10-02 17:22:11 +010093 ARM_COMPUTE_UNUSED(node, num_expected_inputs, num_expected_outputs);
Georgios Pinitasda2491f2018-06-01 17:49:09 +010094}
95
96/** Creates a backend activation layer function
97 *
98 * @tparam ActivationLayerFunction Backend activation function
99 * @tparam TargetInfo Target-specific information
100 *
101 * @param[in] node Node to create the backend function for
102 *
103 * @return Backend activation layer function
104 */
105template <typename ActivationLayerFunction, typename TargetInfo>
106std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
107{
108 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
109
110 // Extract IO and info
111 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
112 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
113 const ActivationLayerInfo act_info = node.activation_info();
114
115 // Create function
116 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
117 func->configure(input, output, act_info);
118
Pablo Tello32521432018-11-15 14:43:10 +0000119 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
120 << node.name()
121 << " Type: " << node.type()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000122 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100123 << " Data Type: " << input->info()->data_type()
124 << " Shape: " << input->info()->tensor_shape()
125 << " Activation function: " << act_info.activation()
126 << " a: " << act_info.a()
127 << " b: " << act_info.b()
128 << " InPlace : " << is_in_place_operation(input, output)
129 << std::endl);
130
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000131 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100132}
133
134/** Create a backend batch normalization layer function
135 *
136 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
137 * @tparam TargetInfo Target-specific information
138 *
139 * @param[in] node Node to create the backend function for
140 *
141 * @return Backend batch normalization layer function
142 */
143template <typename BatchNormalizationLayerFunction, typename TargetInfo>
144std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
145{
146 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
147
148 // Extract IO and info
giuros01acce5042019-02-21 17:32:34 +0000149 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
150 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
151 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
152 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
153 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
154
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100155 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
156 const float epsilon = node.epsilon();
157 const ActivationLayerInfo fused_act = node.fused_activation();
158
159 // Create and configure function
160 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
161 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
162
163 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000164 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
165 << node.name()
166 << " Type: " << node.type()
167 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100168 << " Data Type: " << input->info()->data_type()
169 << " Shape: " << input->info()->tensor_shape()
170 << " Epsilon: " << epsilon << " "
171 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
Pablo Tello32521432018-11-15 14:43:10 +0000172 << " InPlace: " << is_in_place_operation(input, output)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100173 << std::endl);
174
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000175 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100176}
177
giuros01acce5042019-02-21 17:32:34 +0000178/** Create a backend batch normalization layer function
179 *
180 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
181 * @tparam TargetInfo Target-specific information
182 *
183 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000184 * @param[in] ctx Graph context
giuros01acce5042019-02-21 17:32:34 +0000185 *
186 * @return Backend batch normalization layer function
187 */
188template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000189std::unique_ptr<IFunction> create_fused_convolution_batch_normalization_layer(FusedConvolutionBatchNormalizationNode &node, GraphContext &ctx)
giuros01acce5042019-02-21 17:32:34 +0000190{
191 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
192
193 // Extract IO and info
194 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
195 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
196 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
197 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
198 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
199 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
200 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
201
202 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
203
204 const PadStrideInfo conv_info = node.convolution_info();
205 const unsigned int num_groups = node.num_groups();
206 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
207 const ActivationLayerInfo fused_act = node.fused_activation();
208 const float epsilon = node.epsilon();
209
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000210 // Create and configure function (we assume that functions have been validated before creation)
211 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
212 std::unique_ptr<IFunction> func;
213 std::string func_name;
214
215 using FType = FusedConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
216
giuros01acce5042019-02-21 17:32:34 +0000217 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000218 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
219 std::string("FusedConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, num_groups, fast_math, fused_act);
giuros01acce5042019-02-21 17:32:34 +0000220
221 // Log info
222 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
223 << node.name()
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100224 << " Type: " << node.type()
225 << " Target: " << TargetInfo::TargetType
226 << " Data Type: " << input->info()->data_type()
227 << " Input shape: " << input->info()->tensor_shape()
228 << " Weights shape: " << weights->info()->tensor_shape()
229 << " Output shape: " << output->info()->tensor_shape()
230 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
231 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000232 return RETURN_UNIQUE_PTR(func);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100233}
234
235/** Create a backend fused depthwise convolution batch normalization layer function
236 *
237 * @tparam FusedLayerTypes Fused layer types
238 * @tparam TargetInfo Target-specific information
239 *
240 * @param[in] node Node to create the backend function for
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000241 * @param[in] ctx Graph context
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100242 *
243 * @return Backend fused depthwise convolution batch normalization layer function
244 */
245template <typename FusedLayerTypes, typename TargetInfo>
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000246std::unique_ptr<IFunction> create_fused_depthwise_convolution_batch_normalization_layer(FusedDepthwiseConvolutionBatchNormalizationNode &node, GraphContext &ctx)
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100247{
248 validate_node<TargetInfo>(node, 7 /* expected inputs */, 1 /* expected outputs */);
249
250 // Extract IO and info
251 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
252 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
253 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
254 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(3));
255 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(4));
256 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(5));
257 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(6));
258
259 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
260
261 const PadStrideInfo conv_info = node.convolution_info();
262 const unsigned int depth_multiplier = node.depth_multiplier();
263 const ActivationLayerInfo fused_act = node.fused_activation();
264 const float epsilon = node.epsilon();
265
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000266 // Create and configure function (we assume that functions have been validated before creation)
267 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
268 std::unique_ptr<IFunction> func;
269 std::string func_name;
270
271 using FType = FusedDepthwiseConvolutionBatchNormalizationFunction<TargetInfo, FusedLayerTypes>;
272
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100273 // Create and configure function
Gian Marco Iodice5dea19e2019-11-08 12:13:48 +0000274 std::tie(func, func_name) = create_named_memory_managed_function<FType>(
275 std::string("FusedDepthwiseConvolutionBatchNormalizationLayer"), mm, input, weights, biases, output, mean, var, beta, gamma, epsilon, conv_info, depth_multiplier, fused_act);
Manuel Bottinibffb41e2019-06-20 16:00:27 +0100276
277 // Log info
278 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
279 << node.name()
280 << " Type: " << node.type()
giuros01acce5042019-02-21 17:32:34 +0000281 << " Target: " << TargetInfo::TargetType
282 << " Data Type: " << input->info()->data_type()
283 << " Input shape: " << input->info()->tensor_shape()
284 << " Weights shape: " << weights->info()->tensor_shape()
285 << " Output shape: " << output->info()->tensor_shape()
286 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
287 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000288 return RETURN_UNIQUE_PTR(func);
giuros01acce5042019-02-21 17:32:34 +0000289}
290
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100291/** Create a backend bounding box transform layer function
292 *
293 * @tparam BoundingBoxTransformLayerFunction Backend bounding box transform function
294 * @tparam TargetInfo Target-specific information
295 *
296 * @param[in] node Node to create the backend function for
297 *
298 * @return Backend bounding box transform layer function
299 */
300template <typename BoundingBoxTransformLayerFunction, typename TargetInfo>
301std::unique_ptr<IFunction> create_bounding_box_transform_layer(BoundingBoxTransformLayerNode &node)
302{
303 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
304
305 // Extract IO and info
306 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
307 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
308 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
309 const BoundingBoxTransformInfo bbox_info = node.info();
310
311 // Create and configure function
312 auto func = support::cpp14::make_unique<BoundingBoxTransformLayerFunction>();
313 func->configure(input, output, deltas, bbox_info);
314
315 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000316 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
317 << node.name()
318 << " Type: " << node.type()
319 << " Target: " << TargetInfo::TargetType
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100320 << " Data Type: " << input->info()->data_type()
321 << " Shape: " << input->info()->tensor_shape()
322 << " BoundingBox Info img W: " << bbox_info.img_width() << " "
323 << " BoundingBox Info img H: " << bbox_info.img_height() << " "
324 << std::endl);
325
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000326 return RETURN_UNIQUE_PTR(func);
Manuel Bottinid2048ce2018-10-23 17:00:42 +0100327}
328
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100329/** Create a backend channel shuffle layer function
330 *
331 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
332 * @tparam TargetInfo Target-specific information
333 *
334 * @param[in] node Node to create the backend function for
335 *
336 * @return Backend channel shuffle layer function
337 */
338template <typename ChannelShuffleLayerFunction, typename TargetInfo>
339std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
340{
341 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
342
343 // Extract IO and info
344 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
345 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
346 const unsigned int num_groups = node.num_groups();
347
348 // Create function
349 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
350 func->configure(input, output, num_groups);
351
Pablo Tello32521432018-11-15 14:43:10 +0000352 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
353 << node.name()
354 << " Type: " << node.type()
355 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100356 << " Data Type: " << input->info()->data_type()
357 << " Shape: " << input->info()->tensor_shape()
358 << " Num groups: " << num_groups
359 << std::endl);
360
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000361 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100362}
363
Georgios Pinitase2220552018-07-20 13:23:44 +0100364/** Create a backend layer concatenate function
365 *
366 * @tparam ConcatenateLayerFunction Backend concatenate function
367 * @tparam TargetInfo Target-specific information
368 *
369 * @param[in] node Node to create the backend function for
370 *
371 * @return Backend concatenate layer function
372 */
373template <typename ConcatenateLayerFunction, typename TargetInfo>
374std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
375{
376 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
377 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
378
379 // Return nullptr if depth concatenate is switched off
380 if(!node.is_enabled())
381 {
382 return nullptr;
383 }
384
385 // Extract IO and info
Georgios Pinitas4667ddd2020-07-13 21:21:33 +0100386 std::vector<typename TargetInfo::SrcTensorType *> inputs;
Georgios Pinitase2220552018-07-20 13:23:44 +0100387 for(unsigned int i = 0; i < node.num_inputs(); ++i)
388 {
389 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
390 }
391 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas9e4824c2019-04-12 13:15:58 +0100392 const DataLayout data_layout = node.output(0) != nullptr ? node.output(0)->desc().layout : DataLayout::UNKNOWN;
393 const size_t concat_axis = get_dimension_idx(data_layout, node.concatenation_axis());
Georgios Pinitase2220552018-07-20 13:23:44 +0100394
395 // Create and configure function
396 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
397 func->configure(inputs, output, concat_axis);
398
399 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000400 const bool is_quantized = is_data_type_quantized_asymmetric(output->info()->data_type());
401 std::ostringstream qss;
402 if(is_quantized)
403 {
404 qss << " Output QuantInfo: " << output->info()->quantization_info();
405 }
Pablo Tello32521432018-11-15 14:43:10 +0000406 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
407 << node.name()
408 << " Type: " << node.type()
409 << " Target: " << TargetInfo::TargetType
Georgios Pinitase2220552018-07-20 13:23:44 +0100410 << " Data Type: " << output->info()->data_type()
411 << " Shape: " << output->info()->tensor_shape()
412 << " Num Inputs: " << inputs.size()
413 << " Axis: " << concat_axis
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000414 << qss.str()
Georgios Pinitase2220552018-07-20 13:23:44 +0100415 << std::endl);
416
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000417 return RETURN_UNIQUE_PTR(func);
Georgios Pinitase2220552018-07-20 13:23:44 +0100418}
419
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100420/** Create a backend convolution layer function
421 *
422 * @tparam ConvolutionLayerFunctions Backend convolution functions
423 * @tparam TargetInfo Target-specific information
424 *
425 * @param[in] node Node to create the backend function for
426 * @param[in] ctx Graph context
427 *
428 * @return Backend convolution layer function
429 */
430template <typename ConvolutionLayerFunctions, typename TargetInfo>
431std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
432{
433 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
434
435 // Extract IO and info
436 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
437 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
438 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
439 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
440
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100441 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
442
443 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100444 {
445 biases->info()->set_data_type(DataType::S32);
446 }
447
Georgios Pinitas08346e92018-10-16 19:10:46 +0100448 const PadStrideInfo conv_info = node.convolution_info();
449 const unsigned int num_groups = node.num_groups();
450 const ConvolutionMethod conv_algorithm = node.convolution_method();
451 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
452 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100453
454 // Create and configure function (we assume that functions have been validated before creation)
455 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
456 std::unique_ptr<IFunction> func;
457 std::string func_name;
458
Georgios Pinitase2220552018-07-20 13:23:44 +0100459 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100460 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100461 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100462 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
463 std::string("WinogradConvolutionLayer"), mm,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100464 input, weights, biases, output, conv_info, fused_act, fast_math);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100465 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100466 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100467 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100468 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100469 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
470 std::string("DirectConvolutionLayer"),
Georgios Pinitas08346e92018-10-16 19:10:46 +0100471 input, weights, biases, output, conv_info, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100472 }
473 else if(conv_algorithm == ConvolutionMethod::GEMM)
474 {
475 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
476 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100477 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100478 WeightsInfo(), Size2D(1U, 1U), fused_act, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100479 }
480 else
481 {
482 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
483 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100484 input, weights, biases, output, conv_info,
Georgios Pinitas08346e92018-10-16 19:10:46 +0100485 WeightsInfo(), Size2D(1U, 1U), fused_act, fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100486 }
487
488 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100489 std::ostringstream qss;
490 if(is_quantized)
491 {
492 qss << " Input QuantInfo: " << input->info()->quantization_info()
493 << " Weights QuantInfo: " << weights->info()->quantization_info()
494 << " Output QuantInfo: " << output->info()->quantization_info();
495 }
Pablo Tello32521432018-11-15 14:43:10 +0000496 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
497 << node.name()
498 << " Type: " << func_name
499 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100500 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100501 << " Groups: " << num_groups
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100502 << " Input shape: " << input->info()->tensor_shape()
503 << " Weights shape: " << weights->info()->tensor_shape()
504 << " Output shape: " << output->info()->tensor_shape()
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000505 << qss.str()
Georgios Pinitas08346e92018-10-16 19:10:46 +0100506 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100507 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000508 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100509}
510
511/** Create a backend deconvolution layer function
512 *
513 * @tparam DeconvolutionLayerFunction Backend deconvolution function
514 * @tparam TargetInfo Target-specific information
515 *
516 * @param[in] node Node to create the backend function for
517 * @param[in] ctx Graph context
518 *
519 * @return Backend deconvolution layer function
520 */
521template <typename DeconvolutionLayerFunction, typename TargetInfo>
522std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
523{
524 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
525
526 // Extract IO and info
527 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
528 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
529 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
530 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
531
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100532 const PadStrideInfo deconv_info = node.deconvolution_info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100533
534 // Create and configure function (we assume that functions have been validated before creation)
535 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
536 std::unique_ptr<IFunction> func;
537
538 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
539 std::string(), mm,
Manuel Bottinic1b76fa2019-06-17 12:04:40 +0100540 input, weights, biases, output, deconv_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100541
542 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000543 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
544 << node.name()
545 << " Type: " << node.type()
546 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100547 << " Data Type: " << input->info()->data_type()
548 << " Input shape: " << input->info()->tensor_shape()
549 << " Weights shape: " << weights->info()->tensor_shape()
550 << " Output shape: " << output->info()->tensor_shape()
551 << std::endl);
552 return func;
553}
554
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100555/** Create a backend layer depth-wise convolution function
556 *
557 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
558 * @tparam TargetInfo Target-specific information
559 *
560 * @param[in] node Node to create the backend function for
561 *
562 * @return Backend depth-wise convolution layer function
563 */
Manuel Bottini05069f02019-09-26 17:18:26 +0100564template <typename DepthwiseConvolutionLayer, typename TargetInfo>
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100565std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
566{
567 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
568
569 // Extract IO and info
570 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
571 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
572 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
573 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
574
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100575 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
576
577 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100578 {
579 biases->info()->set_data_type(DataType::S32);
580 }
581
Manuel Bottini05069f02019-09-26 17:18:26 +0100582 const PadStrideInfo conv_info = node.convolution_info();
583 const unsigned int depth_multiplier = node.depth_multiplier();
584 const ActivationLayerInfo fused_act = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100585
586 // Create and configure function (we assume that functions have been validated before creation)
587 std::unique_ptr<IFunction> func;
588 std::string func_name;
Manuel Bottini05069f02019-09-26 17:18:26 +0100589
590 std::tie(func, func_name) = create_named_function<DepthwiseConvolutionLayer>(
591 std::string("DepthwiseConvolutionLayer"),
592 input, weights, biases, output, conv_info, depth_multiplier, fused_act);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100593
594 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100595 std::ostringstream qss;
596 if(is_quantized)
597 {
598 qss << " Input QuantInfo: " << input->info()->quantization_info()
599 << " Weights QuantInfo: " << weights->info()->quantization_info()
600 << " Output QuantInfo: " << output->info()->quantization_info();
601 }
Pablo Tello32521432018-11-15 14:43:10 +0000602 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
603 << node.name()
604 << " Type: " << func_name
605 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100606 << " Data Type: " << input->info()->data_type()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100607 << " Input shape: " << input->info()->tensor_shape()
608 << " Weights shape: " << weights->info()->tensor_shape()
609 << " Output shape: " << output->info()->tensor_shape()
Georgios Pinitas05045c12018-12-07 18:31:47 +0000610 << " Depth multiplier: " << depth_multiplier
Isabella Gottardi0ae5de92019-03-14 10:32:11 +0000611 << qss.str()
Georgios Pinitas60e98252018-10-22 16:17:20 +0100612 << (fused_act.enabled() ? " " + to_string(fused_act.activation()) : "")
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100613 << std::endl);
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000614 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100615}
616
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000617/** Create a backend dequantize layer function
618 *
619 * @tparam DequantizationLayer Function Backend dequantize function
620 * @tparam TargetInfo Target-specific information
621 *
622 * @param[in] node Node to create the backend function for
623 *
624 * @return Backend dequantize layer function
625 */
626template <typename DequantizationLayerFunction, typename TargetInfo>
627std::unique_ptr<IFunction> create_dequantization_layer(DequantizationLayerNode &node)
628{
629 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
630
631 // Extract IO and info
632 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
633 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
634
635 ARM_COMPUTE_ERROR_ON(input == nullptr);
636 ARM_COMPUTE_ERROR_ON(output == nullptr);
637
638 // Create and configure function
639 auto func = support::cpp14::make_unique<DequantizationLayerFunction>();
640 func->configure(input, output);
641
642 // Log info
643 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
644 << node.name()
645 << " Type: " << node.type()
646 << " Target: " << TargetInfo::TargetType
647 << " Data Type: " << input->info()->data_type()
648 << " Input shape: " << input->info()->tensor_shape()
649 << " Input quantization info: " << output->info()->quantization_info()
650 << " Output shape: " << output->info()->tensor_shape()
651 << std::endl);
652
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000653 return RETURN_UNIQUE_PTR(func);
Isabella Gottardicd4e9ab2019-11-05 17:50:27 +0000654}
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000655/** Create a backend detection output layer function
656 *
657 * @tparam DetectionOutputLayer Function Backend detection output function
658 * @tparam TargetInfo Target-specific information
659 *
660 * @param[in] node Node to create the backend function for
661 *
662 * @return Backend detection output layer function
663 */
664template <typename DetectionOutputLayerFunction, typename TargetInfo>
665std::unique_ptr<IFunction> create_detection_output_layer(DetectionOutputLayerNode &node)
666{
667 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
668
669 // Extract IO and info
670 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
671 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
672 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
673 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
674 const DetectionOutputLayerInfo detect_info = node.detection_output_info();
675
676 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
677 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
678 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
679 ARM_COMPUTE_ERROR_ON(output == nullptr);
680
681 // Create and configure function
682 auto func = support::cpp14::make_unique<DetectionOutputLayerFunction>();
683 func->configure(input0, input1, input2, output, detect_info);
684
685 // Log info
686 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
687 << node.name()
688 << " Type: " << node.type()
689 << " Target: " << TargetInfo::TargetType
690 << " Data Type: " << input0->info()->data_type()
691 << " Input0 shape: " << input0->info()->tensor_shape()
692 << " Input1 shape: " << input1->info()->tensor_shape()
693 << " Input2 shape: " << input2->info()->tensor_shape()
694 << " Output shape: " << output->info()->tensor_shape()
695 << " DetectionOutputLayer info: " << detect_info
696 << std::endl);
697
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000698 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi7234ed82018-11-27 08:51:10 +0000699}
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000700
701/** Create a backend detection post process layer function
702 *
703 * @tparam DetectionPostProcessLayerFunction Backend detection output function
704 * @tparam TargetInfo Target-specific information
705 *
706 * @param[in] node Node to create the backend function for
707 *
708 * @return Backend detection post process layer function
709 */
710template <typename DetectionPostProcessLayerFunction, typename TargetInfo>
711std::unique_ptr<IFunction> create_detection_post_process_layer(DetectionPostProcessLayerNode &node)
712{
713 validate_node<TargetInfo>(node, 3 /* expected inputs */, 4 /* expected outputs */);
714
715 // Extract IO and info
716 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
717 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
718 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(2));
719 typename TargetInfo::TensorType *output0 = get_backing_tensor<TargetInfo>(node.output(0));
720 typename TargetInfo::TensorType *output1 = get_backing_tensor<TargetInfo>(node.output(1));
721 typename TargetInfo::TensorType *output2 = get_backing_tensor<TargetInfo>(node.output(2));
722 typename TargetInfo::TensorType *output3 = get_backing_tensor<TargetInfo>(node.output(3));
723 const DetectionPostProcessLayerInfo detect_info = node.detection_post_process_info();
724
725 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
726 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
727 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
728 ARM_COMPUTE_ERROR_ON(output0 == nullptr);
729 ARM_COMPUTE_ERROR_ON(output1 == nullptr);
730 ARM_COMPUTE_ERROR_ON(output2 == nullptr);
731 ARM_COMPUTE_ERROR_ON(output3 == nullptr);
732
733 // Create and configure function
734 auto func = support::cpp14::make_unique<DetectionPostProcessLayerFunction>();
735 func->configure(input0, input1, input2, output0, output1, output2, output3, detect_info);
736
737 // Log info
738 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
739 << node.name()
740 << " Type: " << node.type()
741 << " Target: " << TargetInfo::TargetType
742 << " Data Type: " << input0->info()->data_type()
743 << " Input0 shape: " << input0->info()->tensor_shape()
744 << " Input1 shape: " << input1->info()->tensor_shape()
745 << " Input2 shape: " << input2->info()->tensor_shape()
746 << " Output0 shape: " << output0->info()->tensor_shape()
747 << " Output1 shape: " << output1->info()->tensor_shape()
748 << " Output2 shape: " << output2->info()->tensor_shape()
749 << " Output3 shape: " << output3->info()->tensor_shape()
750 << " DetectionPostProcessLayer info: " << detect_info
751 << std::endl);
752
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000753 return RETURN_UNIQUE_PTR(func);
Isabella Gottardia7acb3c2019-01-08 13:48:44 +0000754}
755
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100756/** Create a backend element-wise operation layer function
757 *
758 * @tparam EltwiseFunctions Backend element-wise function
759 * @tparam TargetInfo Target-specific information
760 *
761 * @param[in] node Node to create the backend function for
762 *
763 * @return Backend element-wise operation layer function
764 */
765template <typename EltwiseFunctions, typename TargetInfo>
766std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
767{
768 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
769
770 // Extract IO and info
771 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
772 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
773 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
774 const EltwiseOperation eltwise_op = node.eltwise_operation();
775 const ConvertPolicy convert_policy = node.convert_policy();
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000776 const ActivationLayerInfo act_info = node.fused_activation();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100777 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
778 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
779 ARM_COMPUTE_ERROR_ON(output == nullptr);
780
781 std::unique_ptr<IFunction> func = nullptr;
782 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100783 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100784 {
785 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
786 std::string("ArithmeticAddition"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000787 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100788 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100789 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100790 {
791 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
792 std::string("ArithmeticSubtraction"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000793 input1, input2, output, convert_policy, act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100794 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100795 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100796 {
797 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
798 std::string("PixelWiseMultiplication"),
Giorgio Arena8b2a7d32020-02-11 17:21:31 +0000799 input1, input2, output, 1.f, convert_policy, node.rounding_policy(), act_info);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100800 }
thecha01f8e35842020-07-28 17:28:17 +0100801 else if(eltwise_op == EltwiseOperation::Max)
802 {
803 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Maximum>(
804 std::string("ElementwiseMaximum"),
805 input1, input2, output, act_info);
806 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100807 else
808 {
809 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
810 }
811
812 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000813 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
814 << node.name()
815 << " Type: " << node.type()
816 << " Target: " << TargetInfo::TargetType
817 << " Operation: " << func_name
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100818 << " Data Type: " << input1->info()->data_type()
Pablo Tello32521432018-11-15 14:43:10 +0000819 << " Shape: " << input1->info()->tensor_shape()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100820 << std::endl);
821
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000822 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100823}
824
Sheri Zhang16dddd22020-05-27 15:03:48 +0100825/** Create a backend unary element-wise operation layer function
826 *
827 * @tparam UnaryEltwiseFunctions Backend unary element-wise function
828 * @tparam TargetInfo Target-specific information
829 *
830 * @param[in] node Node to create the backend function for
831 *
832 * @return Backend unary element-wise operation layer function
833 */
834template <typename UnaryEltwiseFunctions, typename TargetInfo>
835std::unique_ptr<IFunction> create_unary_eltwise_layer(UnaryEltwiseLayerNode &node)
836{
837 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
838
839 // Extract IO and info
840 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
841 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
842 const UnaryEltwiseOperation eltwise_op = node.eltwise_descriptor().op;
843
844 ARM_COMPUTE_ERROR_ON(input == nullptr);
845 ARM_COMPUTE_ERROR_ON(output == nullptr);
846
847 std::unique_ptr<IFunction> func = nullptr;
848 std::string func_name;
849 if(eltwise_op == UnaryEltwiseOperation::Exp)
850 {
851 std::tie(func, func_name) = create_named_function<typename UnaryEltwiseFunctions::Exp>(
852 std::string("Exp"),
853 input, output);
854 }
855 else
856 {
857 ARM_COMPUTE_ERROR("Unsupported unary element-wise operation!");
858 }
859
860 // Log info
861 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
862 << node.name()
863 << " Type: " << node.type()
864 << " Target: " << TargetInfo::TargetType
865 << " Operation: " << func_name
866 << " Data Type: " << input->info()->data_type()
867 << " Shape: " << input->info()->tensor_shape()
868 << std::endl);
869
870 return RETURN_UNIQUE_PTR(func);
871}
872
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100873/** Create a backend flatten layer function
874 *
875 * @tparam FlattenLayerFunction Backend flatten function
876 * @tparam TargetInfo Target-specific information
877 *
878 * @param[in] node Node to create the backend function for
879 *
880 * @return Backend flatten layer function
881 */
882template <typename FlattenLayerFunction, typename TargetInfo>
883std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
884{
885 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
886
887 // Extract IO and info
888 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
889 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
890
Georgios Pinitase2220552018-07-20 13:23:44 +0100891 ARM_COMPUTE_ERROR_ON(input == nullptr);
892 ARM_COMPUTE_ERROR_ON(output == nullptr);
893
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100894 // Create and configure function
895 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
896 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100897
898 // Log info
Pablo Tello32521432018-11-15 14:43:10 +0000899 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
900 << node.name()
901 << " Type: " << node.type()
902 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100903 << " Data Type: " << input->info()->data_type()
904 << " Input shape: " << input->info()->tensor_shape()
905 << " Output shape: " << output->info()->tensor_shape()
906 << std::endl);
907
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000908 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100909}
910
911/** Create a backend fully connected layer function
912 *
913 * @tparam FullyConnectedLayerFunction Backend fully-connected function
914 * @tparam TargetInfo Target-specific information
915 *
916 * @param[in] node Node to create the backend function for
917 * @param[in] ctx Graph context
918 *
919 * @return Backend fully connected layer function
920 */
921template <typename FullyConnectedLayerFunction, typename TargetInfo>
922std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
923{
924 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
925
926 // Extract IO and info
927 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
928 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
929 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
930 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100931 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100932
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100933 ARM_COMPUTE_ERROR_ON(input == nullptr);
934 ARM_COMPUTE_ERROR_ON(weights == nullptr);
935 ARM_COMPUTE_ERROR_ON(output == nullptr);
936
Georgios Pinitase2220552018-07-20 13:23:44 +0100937 // Create and configure function
Michalis Spyrou1a569a32019-09-10 17:20:34 +0100938 auto wm = get_weights_manager(ctx, TargetInfo::TargetType);
939 auto mm = get_memory_manager(ctx, TargetInfo::TargetType);
940 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(mm, wm.get());
Georgios Pinitase2220552018-07-20 13:23:44 +0100941 func->configure(input, weights, biases, output, fc_info);
942
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100943 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
944
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100945 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100946 std::ostringstream qss;
947 if(is_quantized)
948 {
949 qss << " Input QuantInfo: " << input->info()->quantization_info()
950 << " Weights QuantInfo: " << weights->info()->quantization_info()
951 << " Output QuantInfo: " << output->info()->quantization_info();
952 }
Pablo Tello32521432018-11-15 14:43:10 +0000953 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
954 << node.name()
955 << " Type: " << node.type()
956 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100957 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100958 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100959 << " Input shape: " << input->info()->tensor_shape()
960 << " Weights shape: " << weights->info()->tensor_shape()
961 << " Output shape: " << output->info()->tensor_shape()
962 << std::endl);
963
Georgios Pinitas0b192e82020-02-20 17:09:28 +0000964 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100965}
966
Manuel Bottini5209be52019-02-13 16:34:56 +0000967/** Create a backend generate proposals layer function
968 *
969 * @tparam GenerateProposalsLayerFunction Backend generate proposals function
970 * @tparam TargetInfo Target-specific information
971 *
972 * @param[in] node Node to create the backend function for
973 * @param[in] ctx Graph context
974 *
975 * @return Backend generate proposals layer function
976 */
977template <typename GenerateProposalsLayerFunction, typename TargetInfo>
978std::unique_ptr<IFunction> create_generate_proposals_layer(GenerateProposalsLayerNode &node, GraphContext &ctx)
979{
980 validate_node<TargetInfo>(node, 3 /* expected inputs */, 3 /* expected outputs */);
981
982 // Extract IO and info
983 typename TargetInfo::TensorType *scores = get_backing_tensor<TargetInfo>(node.input(0));
984 typename TargetInfo::TensorType *deltas = get_backing_tensor<TargetInfo>(node.input(1));
985 typename TargetInfo::TensorType *anchors = get_backing_tensor<TargetInfo>(node.input(2));
986 typename TargetInfo::TensorType *proposals = get_backing_tensor<TargetInfo>(node.output(0));
987 typename TargetInfo::TensorType *scores_out = get_backing_tensor<TargetInfo>(node.output(1));
988 typename TargetInfo::TensorType *num_valid_proposals = get_backing_tensor<TargetInfo>(node.output(2));
989 const GenerateProposalsInfo info = node.info();
990
991 ARM_COMPUTE_ERROR_ON(scores == nullptr);
992 ARM_COMPUTE_ERROR_ON(deltas == nullptr);
993 ARM_COMPUTE_ERROR_ON(anchors == nullptr);
994 ARM_COMPUTE_ERROR_ON(proposals == nullptr);
995 ARM_COMPUTE_ERROR_ON(scores_out == nullptr);
996
997 // Create and configure function
998 auto func = support::cpp14::make_unique<GenerateProposalsLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
999 func->configure(scores, deltas, anchors, proposals, scores_out, num_valid_proposals, info);
1000
1001 // Log info
1002 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
1003 << " Target " << TargetInfo::TargetType
1004 << " Data Type: " << scores->info()->data_type()
1005 << " Scores shape: " << scores->info()->tensor_shape()
1006 << " Deltas shape: " << deltas->info()->tensor_shape()
1007 << " Anchors shape: " << anchors->info()->tensor_shape()
1008 << " Proposals shape: " << proposals->info()->tensor_shape()
1009 << " Num valid proposals shape: " << num_valid_proposals->info()->tensor_shape()
1010 << " Scores Out shape: " << scores_out->info()->tensor_shape()
1011 << std::endl);
1012
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001013 return RETURN_UNIQUE_PTR(func);
Manuel Bottini5209be52019-02-13 16:34:56 +00001014}
1015
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001016/** Create a backend normalization layer function
1017 *
1018 * @tparam NormalizationLayerFunction Backend normalization function
1019 * @tparam TargetInfo Target-specific information
1020 *
1021 * @param[in] node Node to create the backend function for
1022 * @param[in] ctx Graph context
1023 *
1024 * @return Backend normalization layer function
1025 */
1026template <typename NormalizationLayerFunction, typename TargetInfo>
1027std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
1028{
1029 ARM_COMPUTE_UNUSED(ctx);
1030
1031 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1032
1033 // Extract IO and info
1034 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1035 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1036 const NormalizationLayerInfo norm_info = node.normalization_info();
1037 ARM_COMPUTE_ERROR_ON(input == nullptr);
1038 ARM_COMPUTE_ERROR_ON(output == nullptr);
1039
1040 // Create and configure function
1041 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
1042 func->configure(input, output, norm_info);
1043
1044 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001045 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1046 << node.name()
1047 << " Type: " << node.type()
1048 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001049 << " Data Type: " << input->info()->data_type()
1050 << " Input shape: " << input->info()->tensor_shape()
1051 << " Output shape: " << output->info()->tensor_shape()
1052 << " Normalization info: " << norm_info.type()
1053 << std::endl);
1054
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001055 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001056}
1057
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001058/** Create a backend normalize planar YUV layer function
1059 *
1060 * @tparam NormalizePlanarYUVLayerFunction Backend normalize planar YUV function
1061 * @tparam TargetInfo Target-specific information
1062 *
1063 * @param[in] node Node to create the backend function for
1064 *
1065 * @return Backend normalize plnar YUV layer function
1066 */
1067template <typename NormalizePlanarYUVLayerFunction, typename TargetInfo>
1068std::unique_ptr<IFunction> create_normalize_planar_yuv_layer(NormalizePlanarYUVLayerNode &node)
1069{
1070 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
1071
1072 // Extract IO and info
1073 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1074 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
1075 typename TargetInfo::TensorType *std = get_backing_tensor<TargetInfo>(node.input(2));
1076 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1077 ARM_COMPUTE_ERROR_ON(input == nullptr);
1078 ARM_COMPUTE_ERROR_ON(mean == nullptr);
1079 ARM_COMPUTE_ERROR_ON(std == nullptr);
1080 ARM_COMPUTE_ERROR_ON(output == nullptr);
1081
1082 // Create and configure function
1083 auto func = support::cpp14::make_unique<NormalizePlanarYUVLayerFunction>();
1084 func->configure(input, output, mean, std);
1085
1086 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001087 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1088 << node.name()
1089 << " Type: " << node.type()
1090 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001091 << " Data Type: " << input->info()->data_type()
1092 << " Shape: " << input->info()->tensor_shape()
1093 << std::endl);
1094
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001095 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio555d1102018-09-12 13:51:59 +01001096}
1097
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001098/** Create a backend pad layer function
1099 *
1100 * @tparam PadLayerFunction Backend pad function
1101 * @tparam TargetInfo Target-specific information
1102 *
1103 * @param[in] node Node to create the backend function for
1104 *
1105 * @return Backend pad layer function
1106 */
1107template <typename PadLayerFunction, typename TargetInfo>
1108std::unique_ptr<IFunction> create_pad_layer(PadLayerNode &node)
1109{
1110 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1111
1112 // Extract IO and info
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001113 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1114 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1115 const PaddingList &padding = node.padding();
1116 const PixelValue pad_value = node.pad_value();
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001117 ARM_COMPUTE_ERROR_ON(input == nullptr);
1118 ARM_COMPUTE_ERROR_ON(output == nullptr);
1119
1120 // Create and configure function
1121 auto func = support::cpp14::make_unique<PadLayerFunction>();
Georgios Pinitas102b0ce2020-02-13 17:59:09 +00001122 func->configure(input, output, padding, pad_value);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001123
1124 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001125 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1126 << node.name()
1127 << " Type: " << node.type()
1128 << " Target: " << TargetInfo::TargetType
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001129 << " Data Type: " << input->info()->data_type()
1130 << " Input shape: " << input->info()->tensor_shape()
1131 << " Output shape: " << output->info()->tensor_shape()
1132 << std::endl);
1133
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001134 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgio4bb17332018-09-26 13:56:51 +01001135}
1136
Georgios Pinitas57c48242018-08-02 13:41:49 +01001137/** Create a backend permute layer function
1138 *
1139 * @tparam PermuteLayerFunction Backend permute function
1140 * @tparam TargetInfo Target-specific information
1141 *
1142 * @param[in] node Node to create the backend function for
1143 *
1144 * @return Backend permute layer function
1145 */
1146template <typename PermuteLayerFunction, typename TargetInfo>
1147std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
1148{
1149 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1150
1151 // Extract IO and info
1152 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1153 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1154 const PermutationVector &perm = node.permutation_vector();
1155 ARM_COMPUTE_ERROR_ON(input == nullptr);
1156 ARM_COMPUTE_ERROR_ON(output == nullptr);
1157
1158 // Create and configure function
1159 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
1160 func->configure(input, output, perm);
1161
1162 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001163 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1164 << node.name()
1165 << " Type: " << node.type()
1166 << " Target: " << TargetInfo::TargetType
Georgios Pinitas57c48242018-08-02 13:41:49 +01001167 << " Data Type: " << input->info()->data_type()
1168 << " Input shape: " << input->info()->tensor_shape()
1169 << " Output shape: " << output->info()->tensor_shape()
1170 << " Permutation vector: " << perm
1171 << std::endl);
1172
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001173 return RETURN_UNIQUE_PTR(func);
Georgios Pinitas57c48242018-08-02 13:41:49 +01001174}
1175
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001176/** Create a backend pooling layer function
1177 *
1178 * @tparam PoolingLayerFunction Backend pooling function
1179 * @tparam TargetInfo Target-specific information
1180 *
1181 * @param[in] node Node to create the backend function for
1182 *
1183 * @return Backend pooling layer function
1184 */
1185template <typename PoolingLayerFunction, typename TargetInfo>
1186std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
1187{
1188 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1189
1190 // Extract IO and info
1191 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1192 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1193 const PoolingLayerInfo pool_info = node.pooling_info();
1194 ARM_COMPUTE_ERROR_ON(input == nullptr);
1195 ARM_COMPUTE_ERROR_ON(output == nullptr);
1196
1197 // Create and configure function
1198 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
1199 func->configure(input, output, pool_info);
1200
1201 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001202 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1203 << node.name()
1204 << " Type: " << node.type()
1205 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001206 << " Data Type: " << input->info()->data_type()
1207 << " Input shape: " << input->info()->tensor_shape()
1208 << " Output shape: " << output->info()->tensor_shape()
Sang-Hoon Park0cb3da62020-01-15 12:39:56 +00001209 << " Pooling info: " << pool_info.pool_type
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001210 << std::endl);
1211
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001212 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001213}
1214
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001215/** Create a backend PRelu layer function
1216 *
1217 * @tparam PReluFunction Backend PRelu function
1218 * @tparam TargetInfo Target-specific information
1219 *
1220 * @param[in] node Node to create the backend function for
1221 *
1222 * @return Backend PRelu layer function
1223 */
1224template <typename PReluFunction, typename TargetInfo>
1225std::unique_ptr<IFunction> create_prelu_layer(PReluLayerNode &node)
1226{
1227 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1228
1229 // Extract IO and info
1230 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1231 typename TargetInfo::TensorType *alpha = get_backing_tensor<TargetInfo>(node.input(1));
1232 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1233 ARM_COMPUTE_ERROR_ON(input == nullptr || alpha == nullptr);
1234 ARM_COMPUTE_ERROR_ON(output == nullptr);
1235
1236 // Create and configure function
1237 auto func = support::cpp14::make_unique<PReluFunction>();
1238 func->configure(input, alpha, output);
1239
1240 // Log info
1241 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1242 << node.name()
1243 << " Type: " << node.type()
1244 << " Target: " << TargetInfo::TargetType
1245 << " Data Type: " << input->info()->data_type()
1246 << " Input shape: " << input->info()->tensor_shape()
1247 << " Output shape: " << output->info()->tensor_shape()
1248 << std::endl);
1249
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001250 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasf8c47492020-02-04 17:39:59 +00001251}
1252
Giorgio Arena6e9d0e02020-01-03 15:02:04 +00001253/** Create a backend print layer function
1254 *
1255 * @tparam TargetInfo Target-specific information
1256 *
1257 * @param[in] node Node to create the backend function for
1258 *
1259 * @return Backend print layer function
1260 */
1261template <typename TargetInfo>
1262std::unique_ptr<IFunction> create_print_layer(PrintLayerNode &node)
1263{
1264 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1265
1266 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1267 ARM_COMPUTE_ERROR_ON(input == nullptr);
1268 ARM_COMPUTE_UNUSED(input);
1269
1270 // Log info
1271 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1272 << node.name()
1273 << " Type: " << node.type()
1274 << " Target: " << TargetInfo::TargetType
1275 << " Data Type: " << input->info()->data_type()
1276 << " Input shape: " << input->info()->tensor_shape()
1277 << std::endl);
1278
1279 return nullptr;
1280}
1281
Pablo Tello32521432018-11-15 14:43:10 +00001282/** Create a backend priorbox layer function
1283 *
1284 * @tparam PriorBoxLayerFunction Backend priorbox function
1285 * @tparam TargetInfo Target-specific information
1286 *
1287 * @param[in] node Node to create the backend function for
1288 *
1289 * @return Backend priorbox layer function
1290 */
1291template <typename PriorBoxLayerFunction, typename TargetInfo>
1292std::unique_ptr<IFunction> create_priorbox_layer(PriorBoxLayerNode &node)
1293{
1294 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1295
1296 // Extract IO and info
1297 typename TargetInfo::TensorType *input0 = get_backing_tensor<TargetInfo>(node.input(0));
1298 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(1));
1299 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1300 const PriorBoxLayerInfo prior_info = node.priorbox_info();
1301 ARM_COMPUTE_ERROR_ON(input0 == nullptr);
1302 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
1303 ARM_COMPUTE_ERROR_ON(output == nullptr);
1304
1305 // Create and configure function
1306 auto func = support::cpp14::make_unique<PriorBoxLayerFunction>();
1307 func->configure(input0, input1, output, prior_info);
1308
1309 // Log info
1310 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1311 << node.name()
1312 << " Type: " << node.type()
1313 << " Target: " << TargetInfo::TargetType
1314 << " Data Type: " << input0->info()->data_type()
1315 << " Input0 shape: " << input0->info()->tensor_shape()
1316 << " Input1 shape: " << input1->info()->tensor_shape()
1317 << " Output shape: " << output->info()->tensor_shape()
1318 << " PriorBoxLayer info: " << prior_info
1319 << std::endl);
1320
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001321 return RETURN_UNIQUE_PTR(func);
Pablo Tello32521432018-11-15 14:43:10 +00001322}
1323
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001324/** Create a backend quantization layer function
1325 *
1326 * @tparam QuantizationLayerFunction Backend quantization function
1327 * @tparam TargetInfo Target-specific information
1328 *
1329 * @param[in] node Node to create the backend function for
1330 *
1331 * @return Backend quantization layer function
1332 */
1333template <typename QuantizationLayerFunction, typename TargetInfo>
1334std::unique_ptr<IFunction> create_quantization_layer(QuantizationLayerNode &node)
1335{
1336 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1337
1338 // Extract IO and info
1339 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1340 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1341 ARM_COMPUTE_ERROR_ON(input == nullptr);
1342 ARM_COMPUTE_ERROR_ON(output == nullptr);
1343
1344 // Create and configure function
1345 auto func = support::cpp14::make_unique<QuantizationLayerFunction>();
1346 func->configure(input, output);
1347
1348 // Log info
1349 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1350 << node.name()
1351 << " Type: " << node.type()
1352 << " Target: " << TargetInfo::TargetType
1353 << " Data Type: " << input->info()->data_type()
1354 << " Input shape: " << input->info()->tensor_shape()
1355 << " Output shape: " << output->info()->tensor_shape()
1356 << std::endl);
1357
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001358 return RETURN_UNIQUE_PTR(func);
Isabella Gottardi3db1ba92019-05-17 12:35:20 +01001359}
1360
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001361/** Create a backend reorg layer function
1362 *
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001363 * @tparam ReorgLayerFunction Backend reorg function
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001364 * @tparam TargetInfo Target-specific information
1365 *
1366 * @param[in] node Node to create the backend function for
1367 *
1368 * @return Backend reshape layer function
1369 */
1370template <typename ReorgLayerFunction, typename TargetInfo>
1371std::unique_ptr<IFunction> create_reorg_layer(ReorgLayerNode &node)
1372{
1373 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1374
1375 // Extract IO and info
1376 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1377 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1378 ARM_COMPUTE_ERROR_ON(input == nullptr);
1379 ARM_COMPUTE_ERROR_ON(output == nullptr);
1380
1381 // Create and configure function
1382 auto func = support::cpp14::make_unique<ReorgLayerFunction>();
1383 func->configure(input, output, node.stride());
1384
1385 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001386 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1387 << node.name()
1388 << " Type: " << node.type()
1389 << " Target: " << TargetInfo::TargetType
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001390 << " Data Type: " << input->info()->data_type()
1391 << " Input shape: " << input->info()->tensor_shape()
1392 << " Output shape: " << output->info()->tensor_shape()
1393 << std::endl);
1394
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001395 return RETURN_UNIQUE_PTR(func);
Gian Marco Iodice23e24792018-09-07 15:32:14 +01001396}
1397
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001398/** Create a backend reshape layer function
1399 *
1400 * @tparam ReshapeLayerFunction Backend reshape function
1401 * @tparam TargetInfo Target-specific information
1402 *
1403 * @param[in] node Node to create the backend function for
1404 *
1405 * @return Backend reshape layer function
1406 */
1407template <typename ReshapeLayerFunction, typename TargetInfo>
1408std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
1409{
1410 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1411
1412 // Extract IO and info
1413 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1414 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1415 ARM_COMPUTE_ERROR_ON(input == nullptr);
1416 ARM_COMPUTE_ERROR_ON(output == nullptr);
1417
1418 // Create and configure function
1419 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
1420 func->configure(input, output);
1421
1422 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001423 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1424 << node.name()
1425 << " Type: " << node.type()
1426 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001427 << " Data Type: " << input->info()->data_type()
1428 << " Input shape: " << input->info()->tensor_shape()
1429 << " Output shape: " << output->info()->tensor_shape()
1430 << std::endl);
1431
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001432 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001433}
1434
1435/** Create a backend resize layer function
1436 *
1437 * @tparam ResizeLayerFunction Backend resize function
1438 * @tparam TargetInfo Target-specific information
1439 *
1440 * @param[in] node Node to create the backend function for
1441 *
1442 * @return Backend resize layer function
1443 */
1444template <typename ResizeLayerFunction, typename TargetInfo>
1445std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
1446{
1447 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1448
1449 // Extract IO and info
1450 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1451 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1452 ARM_COMPUTE_ERROR_ON(input == nullptr);
1453 ARM_COMPUTE_ERROR_ON(output == nullptr);
1454 const InterpolationPolicy policy = node.policy();
1455
1456 // Create and configure function
1457 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
Sang-Hoon Parkccd94962020-06-09 12:09:24 +01001458 func->configure(input, output, ScaleKernelInfo{ policy, BorderMode::CONSTANT });
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001459
1460 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001461 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1462 << node.name()
1463 << " Type: " << node.type()
1464 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001465 << " Data Type: " << input->info()->data_type()
1466 << " Input shape: " << input->info()->tensor_shape()
1467 << " Output shape: " << output->info()->tensor_shape()
1468 << " Interpolation: " << policy
1469 << std::endl);
1470
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001471 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001472}
1473
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001474/** Create a backend ROI align layer function
1475 *
1476 * @tparam ROIAlignLayerFunction ROI Align function
1477 * @tparam TargetInfo Target-specific information
1478 *
1479 * @param[in] node Node to create the backend function for
1480 *
1481 * @return ROI Align layer function
1482 */
1483template <typename ROIAlignLayerFunction, typename TargetInfo>
1484std::unique_ptr<IFunction> create_roi_align_layer(ROIAlignLayerNode &node)
1485{
1486 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
1487
1488 // Extract IO and info
1489 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1490 typename TargetInfo::TensorType *rois = get_backing_tensor<TargetInfo>(node.input(1));
1491 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1492 ARM_COMPUTE_ERROR_ON(input == nullptr);
1493 ARM_COMPUTE_ERROR_ON(output == nullptr);
1494 ARM_COMPUTE_ERROR_ON(rois == nullptr);
1495
1496 const ROIPoolingLayerInfo pool_info = node.pooling_info();
1497
1498 // Create and configure function
1499 auto func = support::cpp14::make_unique<ROIAlignLayerFunction>();
1500
1501 func->configure(input, rois, output, pool_info);
1502
1503 // Log info
Isabella Gottardi0ae5de92019-03-14 10:32:11 +00001504 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1505 << node.name()
1506 << " Type: " << node.type()
1507 << " Target: " << TargetInfo::TargetType
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001508 << " Data Type: " << input->info()->data_type()
1509 << " Input shape: " << input->info()->tensor_shape()
1510 << " Output shape: " << output->info()->tensor_shape()
1511 << " ROIs shape: " << rois->info()->tensor_shape()
1512 << " ROIPooling width: " << pool_info.pooled_width()
1513 << " ROIPooling height: " << pool_info.pooled_height()
1514 << std::endl);
1515
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001516 return RETURN_UNIQUE_PTR(func);
Manuel Bottini3f9d4d72018-10-19 14:04:42 +01001517}
1518
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001519/** Create a backend slice layer function
1520 *
1521 * @tparam SliceLayerFunction Backend slice function
1522 * @tparam TargetInfo Target-specific information
1523 *
1524 * @param[in] node Node to create the backend function for
1525 *
1526 * @return Backend slice layer function
1527 */
1528template <typename SliceLayerFunction, typename TargetInfo>
1529std::unique_ptr<IFunction> create_slice_layer(SliceLayerNode &node)
1530{
1531 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1532
1533 // Extract IO and info
1534 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1535 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1536 ARM_COMPUTE_ERROR_ON(input == nullptr);
1537 ARM_COMPUTE_ERROR_ON(output == nullptr);
1538
1539 // Create and configure function
1540 auto func = support::cpp14::make_unique<SliceLayerFunction>();
1541 func->configure(input, output, node.starts(), node.ends());
1542
1543 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001544 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1545 << node.name()
1546 << " Type: " << node.type()
1547 << " Target: " << TargetInfo::TargetType
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001548 << " Data Type: " << input->info()->data_type()
1549 << " Input shape: " << input->info()->tensor_shape()
1550 << " Output shape: " << output->info()->tensor_shape()
1551 << std::endl);
1552
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001553 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioc30b6682018-09-12 17:44:08 +01001554}
1555
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001556/** Create a backend softmax layer function
1557 *
1558 * @tparam SoftmaxLayerFunction Backend softmax function
1559 * @tparam TargetInfo Target-specific information
1560 *
1561 * @param[in] node Node to create the backend function for
1562 * @param[in] ctx Graph context
1563 *
1564 * @return Backend softmax layer function
1565 */
1566template <typename SoftmaxLayerFunction, typename TargetInfo>
1567std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
1568{
1569 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1570
1571 // Extract IO and info
1572 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1573 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1574 const float beta = node.beta();
1575 ARM_COMPUTE_ERROR_ON(input == nullptr);
1576 ARM_COMPUTE_ERROR_ON(output == nullptr);
1577
1578 // Create and configure function
1579 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
1580 func->configure(input, output, beta);
1581
1582 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001583 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1584 << node.name()
1585 << " Type: " << node.type()
1586 << " Target: " << TargetInfo::TargetType
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001587 << " Data Type: " << input->info()->data_type()
1588 << " Input shape: " << input->info()->tensor_shape()
1589 << " Output shape: " << output->info()->tensor_shape()
1590 << std::endl);
1591
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001592 return RETURN_UNIQUE_PTR(func);
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001593}
Michele Di Giorgioec699752019-03-22 15:25:32 +00001594
1595/** Create a backend layer stack function
1596 *
1597 * @tparam StackLayerFunction Backend stack function
1598 * @tparam TargetInfo Target-specific information
1599 *
1600 * @param[in] node Node to create the backend function for
1601 *
1602 * @return Backend stack layer function
1603 */
1604template <typename StackLayerFunction, typename TargetInfo>
1605std::unique_ptr<arm_compute::IFunction> create_stack_layer(StackLayerNode &node)
1606{
1607 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Stack node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
1608 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
1609
1610 // Extract IO and info
1611 std::vector<typename TargetInfo::TensorType *> inputs;
1612 for(unsigned int i = 0; i < node.num_inputs(); ++i)
1613 {
1614 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
1615 }
1616 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1617 const int axis = node.axis();
1618
1619 // Create and configure function
1620 auto func = support::cpp14::make_unique<StackLayerFunction>();
1621 func->configure(inputs, axis, output);
1622
1623 // Log info
1624 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1625 << node.name()
1626 << " Type: " << node.type()
1627 << " Target: " << TargetInfo::TargetType
1628 << " Data Type: " << output->info()->data_type()
1629 << " Inputs shape: " << inputs[0]->info()->tensor_shape()
1630 << " Output shape: " << output->info()->tensor_shape()
1631 << " Num Inputs: " << inputs.size()
1632 << " Axis: " << axis
1633 << std::endl);
1634
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001635 return RETURN_UNIQUE_PTR(func);
Michele Di Giorgioec699752019-03-22 15:25:32 +00001636}
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001637/** Create a backend Upsample layer function
1638 *
1639 * @tparam UpsampleLayerFunction Backend Upsample function
1640 * @tparam TargetInfo Target-specific information
1641 *
1642 * @param[in] node Node to create the backend function for
1643 * @param[in] ctx Graph context
1644 *
1645 * @return Backend Upsample layer function
1646 */
1647template <typename UpsampleLayerFunction, typename TargetInfo>
1648std::unique_ptr<IFunction> create_upsample_layer(UpsampleLayerNode &node, GraphContext &ctx)
1649{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001650 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001651 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1652
1653 // Extract IO and info
1654 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1655 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1656 const Size2D info = node.info();
1657 const InterpolationPolicy upsampling_policy = node.upsampling_policy();
1658 ARM_COMPUTE_ERROR_ON(upsampling_policy != InterpolationPolicy::NEAREST_NEIGHBOR);
1659 ARM_COMPUTE_ERROR_ON(info.x() != 2 || info.y() != 2);
1660 ARM_COMPUTE_ERROR_ON(input == nullptr);
1661 ARM_COMPUTE_ERROR_ON(output == nullptr);
1662
1663 // Create and configure function
1664 auto func = support::cpp14::make_unique<UpsampleLayerFunction>();
1665 func->configure(input, output, info, upsampling_policy);
1666
1667 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001668 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1669 << node.name()
1670 << " Type: " << node.type()
1671 << " Target: " << TargetInfo::TargetType
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001672 << " Data Type: " << input->info()->data_type()
1673 << " Input shape: " << input->info()->tensor_shape()
1674 << " Output shape: " << output->info()->tensor_shape()
1675 << " Strides: " << info
1676 << " Upsampling policy: " << upsampling_policy
1677 << std::endl);
1678
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001679 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou4e1c3f32018-09-20 17:14:03 +01001680}
Michalis Spyrou96f67692018-09-13 11:39:28 +01001681/** Create a backend YOLO layer function
1682 *
1683 * @tparam YoloLayerFunction Backend YOLO function
1684 * @tparam TargetInfo Target-specific information
1685 *
1686 * @param[in] node Node to create the backend function for
1687 * @param[in] ctx Graph context
1688 *
1689 * @return Backend YOLO layer function
1690 */
1691template <typename YOLOlayerFunction, typename TargetInfo>
1692std::unique_ptr<IFunction> create_yolo_layer(YOLOLayerNode &node, GraphContext &ctx)
1693{
Michalis Spyrou6bff1952019-10-02 17:22:11 +01001694 ARM_COMPUTE_UNUSED(ctx);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001695 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
1696
1697 // Extract IO and info
1698 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
1699 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
1700 const ActivationLayerInfo act_info = node.activation_info();
1701 const int32_t num_classes = node.num_classes();
1702 ARM_COMPUTE_ERROR_ON(num_classes <= 0);
1703 ARM_COMPUTE_ERROR_ON(input == nullptr);
1704 ARM_COMPUTE_ERROR_ON(output == nullptr);
1705
1706 // Create and configure function
1707 auto func = support::cpp14::make_unique<YOLOlayerFunction>();
1708 func->configure(input, output, act_info, num_classes);
1709
1710 // Log info
Pablo Tello32521432018-11-15 14:43:10 +00001711 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated "
1712 << node.name()
1713 << " Type: " << node.type()
1714 << " Target: " << TargetInfo::TargetType
Michalis Spyrou96f67692018-09-13 11:39:28 +01001715 << " Data Type: " << input->info()->data_type()
1716 << " Input shape: " << input->info()->tensor_shape()
1717 << " Output shape: " << output->info()->tensor_shape()
1718 << " Activation function: " << act_info.activation()
1719 << " Num classes: " << num_classes
1720 << std::endl);
1721
Georgios Pinitas0b192e82020-02-20 17:09:28 +00001722 return RETURN_UNIQUE_PTR(func);
Michalis Spyrou96f67692018-09-13 11:39:28 +01001723}
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001724} // namespace detail
1725} // namespace backends
1726} // namespace graph
1727} // namespace arm_compute
1728
Michalis Spyrouf4643372019-11-29 16:17:13 +00001729#endif /* ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H */