blob: bf458ae33fdf7f4098c9fd5c7e60d223612b0e87 [file] [log] [blame]
Georgios Pinitasda2491f2018-06-01 17:49:09 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
25#define __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__
26
27#include "arm_compute/graph/Logger.h"
28#include "arm_compute/graph/Tensor.h"
29#include "arm_compute/graph/TypePrinter.h"
30#include "arm_compute/graph/Types.h"
31#include "arm_compute/graph/backends/Utils.h"
32#include "arm_compute/graph/nodes/Nodes.h"
33
34#include "arm_compute/core/Error.h"
35#include "arm_compute/core/Helpers.h"
36#include "arm_compute/core/ITensorInfo.h"
37#include "arm_compute/core/utils/misc/Cast.h"
38
39namespace arm_compute
40{
41namespace graph
42{
43namespace backends
44{
45namespace detail
46{
47/** Returns backing tensor of a given tensor
48 *
49 * @tparam TargetInfo Target information
50 *
51 * @param[in] tensor Tensor to extract the backing tensor from
52 *
53 * @return Backing tensor if present else nullptr
54 */
55template <typename TargetInfo>
56typename TargetInfo::TensorType *get_backing_tensor(arm_compute::graph::Tensor *tensor)
57{
58 typename TargetInfo::TensorType *backing_tensor = nullptr;
59 if(tensor != nullptr)
60 {
61 ARM_COMPUTE_ERROR_ON(tensor->desc().target != TargetInfo::TargetType);
62 // Get backing tensor handle
63 ITensorHandle *tensor_handle = tensor->handle();
64 // Get backing tensor
65 backing_tensor = (tensor_handle != nullptr) ? arm_compute::utils::cast::polymorphic_cast<typename TargetInfo::TensorType *>(&tensor_handle->tensor()) : nullptr;
66 }
67
68 return backing_tensor;
69}
70
71template <typename TargetInfo>
72void validate_node(const INode &node, size_t num_expected_inputs, size_t num_expected_outputs)
73{
74 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating " << node.type()
75 << " Target : " << TargetInfo::TargetType
76 << " ID : " << node.id()
77 << " Name: " << node.name()
78 << std::endl);
79
80 ARM_COMPUTE_ERROR_ON(TargetInfo::TargetType != node.assigned_target());
81 ARM_COMPUTE_ERROR_ON(node.num_inputs() != num_expected_inputs);
82 ARM_COMPUTE_ERROR_ON(node.num_outputs() != num_expected_outputs);
83}
84
85/** Creates a backend activation layer function
86 *
87 * @tparam ActivationLayerFunction Backend activation function
88 * @tparam TargetInfo Target-specific information
89 *
90 * @param[in] node Node to create the backend function for
91 *
92 * @return Backend activation layer function
93 */
94template <typename ActivationLayerFunction, typename TargetInfo>
95std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
96{
97 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
98
99 // Extract IO and info
100 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
101 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
102 const ActivationLayerInfo act_info = node.activation_info();
103
104 // Create function
105 auto func = support::cpp14::make_unique<ActivationLayerFunction>();
106 func->configure(input, output, act_info);
107
108 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
109 << " Target " << TargetInfo::TargetType
110 << " Data Type: " << input->info()->data_type()
111 << " Shape: " << input->info()->tensor_shape()
112 << " Activation function: " << act_info.activation()
113 << " a: " << act_info.a()
114 << " b: " << act_info.b()
115 << " InPlace : " << is_in_place_operation(input, output)
116 << std::endl);
117
118 return std::move(func);
119}
120
121/** Create a backend batch normalization layer function
122 *
123 * @tparam BatchNormalizationLayerFunction Backend batch normalization function
124 * @tparam TargetInfo Target-specific information
125 *
126 * @param[in] node Node to create the backend function for
127 *
128 * @return Backend batch normalization layer function
129 */
130template <typename BatchNormalizationLayerFunction, typename TargetInfo>
131std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
132{
133 validate_node<TargetInfo>(node, 5 /* expected inputs */, 1 /* expected outputs */);
134
135 // Extract IO and info
136 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
137 typename TargetInfo::TensorType *mean = get_backing_tensor<TargetInfo>(node.input(1));
138 typename TargetInfo::TensorType *var = get_backing_tensor<TargetInfo>(node.input(2));
139 typename TargetInfo::TensorType *beta = get_backing_tensor<TargetInfo>(node.input(3));
140 typename TargetInfo::TensorType *gamma = get_backing_tensor<TargetInfo>(node.input(4));
141 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
142 const float epsilon = node.epsilon();
143 const ActivationLayerInfo fused_act = node.fused_activation();
144
145 // Create and configure function
146 auto func = support::cpp14::make_unique<BatchNormalizationLayerFunction>();
147 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
148
149 // Log info
150 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
151 << " Target " << TargetInfo::TargetType
152 << " Data Type: " << input->info()->data_type()
153 << " Shape: " << input->info()->tensor_shape()
154 << " Epsilon: " << epsilon << " "
155 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
156 << " InPlace : " << is_in_place_operation(input, output)
157 << std::endl);
158
159 return std::move(func);
160}
161
162/** Create a backend channel shuffle layer function
163 *
164 * @tparam ChannelShuffleLayerFunction Backend channel shuffle function
165 * @tparam TargetInfo Target-specific information
166 *
167 * @param[in] node Node to create the backend function for
168 *
169 * @return Backend channel shuffle layer function
170 */
171template <typename ChannelShuffleLayerFunction, typename TargetInfo>
172std::unique_ptr<IFunction> create_channel_shuffle_layer(ChannelShuffleLayerNode &node)
173{
174 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
175
176 // Extract IO and info
177 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
178 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
179 const unsigned int num_groups = node.num_groups();
180
181 // Create function
182 auto func = support::cpp14::make_unique<ChannelShuffleLayerFunction>();
183 func->configure(input, output, num_groups);
184
185 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
186 << " Target " << TargetInfo::TargetType
187 << " Data Type: " << input->info()->data_type()
188 << " Shape: " << input->info()->tensor_shape()
189 << " Num groups: " << num_groups
190 << std::endl);
191
192 return std::move(func);
193}
194
Georgios Pinitase2220552018-07-20 13:23:44 +0100195/** Create a backend layer concatenate function
196 *
197 * @tparam ConcatenateLayerFunction Backend concatenate function
198 * @tparam TargetInfo Target-specific information
199 *
200 * @param[in] node Node to create the backend function for
201 *
202 * @return Backend concatenate layer function
203 */
204template <typename ConcatenateLayerFunction, typename TargetInfo>
205std::unique_ptr<arm_compute::IFunction> create_concatenate_layer(ConcatenateLayerNode &node)
206{
207 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating Concatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
208 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
209
210 // Return nullptr if depth concatenate is switched off
211 if(!node.is_enabled())
212 {
213 return nullptr;
214 }
215
216 // Extract IO and info
217 std::vector<typename TargetInfo::TensorType *> inputs;
218 for(unsigned int i = 0; i < node.num_inputs(); ++i)
219 {
220 inputs.push_back(get_backing_tensor<TargetInfo>(node.input(i)));
221 }
222 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
223 const DataLayoutDimension concat_axis = node.concatenation_axis();
224
225 // Create and configure function
226 auto func = support::cpp14::make_unique<ConcatenateLayerFunction>();
227 func->configure(inputs, output, concat_axis);
228
229 // Log info
230 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
231 << " Target " << TargetInfo::TargetType
232 << " Data Type: " << output->info()->data_type()
233 << " Shape: " << output->info()->tensor_shape()
234 << " Num Inputs: " << inputs.size()
235 << " Axis: " << concat_axis
236 << std::endl);
237
238 return std::move(func);
239}
240
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100241/** Create a backend convolution layer function
242 *
243 * @tparam ConvolutionLayerFunctions Backend convolution functions
244 * @tparam TargetInfo Target-specific information
245 *
246 * @param[in] node Node to create the backend function for
247 * @param[in] ctx Graph context
248 *
249 * @return Backend convolution layer function
250 */
251template <typename ConvolutionLayerFunctions, typename TargetInfo>
252std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
253{
254 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
255
256 // Extract IO and info
257 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
258 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
259 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
260 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
261
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100262 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
263
264 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100265 {
266 biases->info()->set_data_type(DataType::S32);
267 }
268
269 const PadStrideInfo conv_info = node.convolution_info();
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100270 const unsigned int num_groups = node.num_groups();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100271 const ConvolutionMethod conv_algorithm = node.convolution_method();
Georgios Pinitase2220552018-07-20 13:23:44 +0100272 const bool fast_math = node.fast_math_hint() == FastMathHint::Enabled;
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100273
274 // Create and configure function (we assume that functions have been validated before creation)
275 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
276 std::unique_ptr<IFunction> func;
277 std::string func_name;
278
Georgios Pinitase2220552018-07-20 13:23:44 +0100279 if(conv_algorithm == ConvolutionMethod::Winograd)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100280 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100281 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "WinogradConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100282 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::WinogradConvolutionLayer>(
283 std::string("WinogradConvolutionLayer"), mm,
284 input, weights, biases, output, conv_info, ActivationLayerInfo(), fast_math);
285 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100286 else if(conv_algorithm == ConvolutionMethod::Direct)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100287 {
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100288 ARM_COMPUTE_ERROR_ON_MSG(num_groups != 1, "DirectConvolutionLayer does not support grouping!");
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100289 std::tie(func, func_name) = create_named_function<typename ConvolutionLayerFunctions::DirectConvolutionLayer>(
290 std::string("DirectConvolutionLayer"),
291 input, weights, biases, output, conv_info);
292 }
293 else if(conv_algorithm == ConvolutionMethod::GEMM)
294 {
295 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GEMMConvolutionLayer>(
296 std::string("GEMMConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100297 input, weights, biases, output, conv_info,
298 WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100299 }
300 else
301 {
302 std::tie(func, func_name) = create_named_memory_managed_function<typename ConvolutionLayerFunctions::GenericConvolutionLayer>(
303 std::string("GenericConvolutionLayer"), mm,
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100304 input, weights, biases, output, conv_info,
305 WeightsInfo(), Size2D(1U, 1U), ActivationLayerInfo(), fast_math, num_groups);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100306 }
307
308 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100309 std::ostringstream qss;
310 if(is_quantized)
311 {
312 qss << " Input QuantInfo: " << input->info()->quantization_info()
313 << " Weights QuantInfo: " << weights->info()->quantization_info()
314 << " Output QuantInfo: " << output->info()->quantization_info();
315 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100316 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
317 << " Target " << TargetInfo::TargetType
318 << " Data Type: " << input->info()->data_type()
Georgios Pinitas2a2db592018-08-15 12:14:46 +0100319 << " Groups: " << num_groups
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100320 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100321 << " Input shape: " << input->info()->tensor_shape()
322 << " Weights shape: " << weights->info()->tensor_shape()
323 << " Output shape: " << output->info()->tensor_shape()
324 << std::endl);
325 return func;
326}
327
328/** Create a backend deconvolution layer function
329 *
330 * @tparam DeconvolutionLayerFunction Backend deconvolution function
331 * @tparam TargetInfo Target-specific information
332 *
333 * @param[in] node Node to create the backend function for
334 * @param[in] ctx Graph context
335 *
336 * @return Backend deconvolution layer function
337 */
338template <typename DeconvolutionLayerFunction, typename TargetInfo>
339std::unique_ptr<IFunction> create_deconvolution_layer(DeconvolutionLayerNode &node, GraphContext &ctx)
340{
341 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
342
343 // Extract IO and info
344 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
345 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
346 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
347 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
348
349 const PadStrideInfo deconv_info = node.deconvolution_info();
350 const Size2D inner_border = node.inner_border();
351
352 // Create and configure function (we assume that functions have been validated before creation)
353 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, TargetInfo::TargetType);
354 std::unique_ptr<IFunction> func;
355
356 std::tie(func, std::ignore) = create_named_memory_managed_function<DeconvolutionLayerFunction>(
357 std::string(), mm,
358 input, weights, biases, output, deconv_info, inner_border.x(), inner_border.y());
359
360 // Log info
361 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
362 << " Target " << TargetInfo::TargetType
363 << " Data Type: " << input->info()->data_type()
364 << " Input shape: " << input->info()->tensor_shape()
365 << " Weights shape: " << weights->info()->tensor_shape()
366 << " Output shape: " << output->info()->tensor_shape()
367 << std::endl);
368 return func;
369}
370
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100371/** Create a backend layer depth-wise convolution function
372 *
373 * @tparam DepthwiseConvolutionLayerFunctions Backend depthwise convolution function
374 * @tparam TargetInfo Target-specific information
375 *
376 * @param[in] node Node to create the backend function for
377 *
378 * @return Backend depth-wise convolution layer function
379 */
380template <typename DepthwiseConvolutionLayerFunctions, typename TargetInfo>
381std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
382{
383 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
384
385 // Extract IO and info
386 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
387 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
388 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
389 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
390
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100391 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
392
393 if(is_quantized)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100394 {
395 biases->info()->set_data_type(DataType::S32);
396 }
397
398 const PadStrideInfo conv_info = node.convolution_info();
399 const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
400
401 // Create and configure function (we assume that functions have been validated before creation)
402 std::unique_ptr<IFunction> func;
403 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100404 if(dwc_algorithm == DepthwiseConvolutionMethod::Optimized3x3)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100405 {
406 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::DepthwiseConvolutionLayer3x3>(
407 std::string("DepthwiseConvolutionLayer3x3"),
408 input, weights, biases, output, conv_info);
409 }
410 else
411 {
412 std::tie(func, func_name) = create_named_function<typename DepthwiseConvolutionLayerFunctions::GenericDepthwiseConvolutionLayer>(
413 std::string("DepthwiseConvolutionLayer"),
414 input, weights, biases, output, conv_info);
415 }
416
417 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100418 std::ostringstream qss;
419 if(is_quantized)
420 {
421 qss << " Input QuantInfo: " << input->info()->quantization_info()
422 << " Weights QuantInfo: " << weights->info()->quantization_info()
423 << " Output QuantInfo: " << output->info()->quantization_info();
424 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100425 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
426 << " Target " << TargetInfo::TargetType
427 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100428 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100429 << " Input shape: " << input->info()->tensor_shape()
430 << " Weights shape: " << weights->info()->tensor_shape()
431 << " Output shape: " << output->info()->tensor_shape()
432 << std::endl);
433 return func;
434}
435
436/** Create a backend element-wise operation layer function
437 *
438 * @tparam EltwiseFunctions Backend element-wise function
439 * @tparam TargetInfo Target-specific information
440 *
441 * @param[in] node Node to create the backend function for
442 *
443 * @return Backend element-wise operation layer function
444 */
445template <typename EltwiseFunctions, typename TargetInfo>
446std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
447{
448 validate_node<TargetInfo>(node, 2 /* expected inputs */, 1 /* expected outputs */);
449
450 // Extract IO and info
451 typename TargetInfo::TensorType *input1 = get_backing_tensor<TargetInfo>(node.input(0));
452 typename TargetInfo::TensorType *input2 = get_backing_tensor<TargetInfo>(node.input(1));
453 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
454 const EltwiseOperation eltwise_op = node.eltwise_operation();
455 const ConvertPolicy convert_policy = node.convert_policy();
456 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
457 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
458 ARM_COMPUTE_ERROR_ON(output == nullptr);
459
460 std::unique_ptr<IFunction> func = nullptr;
461 std::string func_name;
Georgios Pinitase2220552018-07-20 13:23:44 +0100462 if(eltwise_op == EltwiseOperation::Add)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100463 {
464 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Addition>(
465 std::string("ArithmeticAddition"),
466 input1, input2, output, convert_policy);
467 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100468 else if(eltwise_op == EltwiseOperation::Sub)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100469 {
470 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Subtraction>(
471 std::string("ArithmeticSubtraction"),
472 input1, input2, output, convert_policy);
473 }
Georgios Pinitase2220552018-07-20 13:23:44 +0100474 else if(eltwise_op == EltwiseOperation::Mul)
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100475 {
476 std::tie(func, func_name) = create_named_function<typename EltwiseFunctions::Multiplication>(
477 std::string("PixelWiseMultiplication"),
478 input1, input2, output, 1.f, convert_policy, node.rounding_policy());
479 }
480 else
481 {
482 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
483 }
484
485 // Log info
486 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
487 << " Target " << TargetInfo::TargetType
488 << " Operation " << func_name
489 << " Data Type: " << input1->info()->data_type()
490 << " Shape : " << input1->info()->tensor_shape()
491 << std::endl);
492
493 return func;
494}
495
496/** Create a backend flatten layer function
497 *
498 * @tparam FlattenLayerFunction Backend flatten function
499 * @tparam TargetInfo Target-specific information
500 *
501 * @param[in] node Node to create the backend function for
502 *
503 * @return Backend flatten layer function
504 */
505template <typename FlattenLayerFunction, typename TargetInfo>
506std::unique_ptr<IFunction> create_flatten_layer(FlattenLayerNode &node)
507{
508 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
509
510 // Extract IO and info
511 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
512 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
513
Georgios Pinitase2220552018-07-20 13:23:44 +0100514 ARM_COMPUTE_ERROR_ON(input == nullptr);
515 ARM_COMPUTE_ERROR_ON(output == nullptr);
516
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100517 // Create and configure function
518 auto func = support::cpp14::make_unique<FlattenLayerFunction>();
519 func->configure(input, output);
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100520
521 // Log info
522 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
523 << " Target " << TargetInfo::TargetType
524 << " Data Type: " << input->info()->data_type()
525 << " Input shape: " << input->info()->tensor_shape()
526 << " Output shape: " << output->info()->tensor_shape()
527 << std::endl);
528
529 return std::move(func);
530}
531
532/** Create a backend fully connected layer function
533 *
534 * @tparam FullyConnectedLayerFunction Backend fully-connected function
535 * @tparam TargetInfo Target-specific information
536 *
537 * @param[in] node Node to create the backend function for
538 * @param[in] ctx Graph context
539 *
540 * @return Backend fully connected layer function
541 */
542template <typename FullyConnectedLayerFunction, typename TargetInfo>
543std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
544{
545 validate_node<TargetInfo>(node, 3 /* expected inputs */, 1 /* expected outputs */);
546
547 // Extract IO and info
548 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
549 typename TargetInfo::TensorType *weights = get_backing_tensor<TargetInfo>(node.input(1));
550 typename TargetInfo::TensorType *biases = get_backing_tensor<TargetInfo>(node.input(2));
551 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
Georgios Pinitas7d66a8e2018-07-17 12:28:42 +0100552 const FullyConnectedLayerInfo fc_info = node.info();
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100553
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100554 ARM_COMPUTE_ERROR_ON(input == nullptr);
555 ARM_COMPUTE_ERROR_ON(weights == nullptr);
556 ARM_COMPUTE_ERROR_ON(output == nullptr);
557
Georgios Pinitase2220552018-07-20 13:23:44 +0100558 // Create and configure function
559 auto func = support::cpp14::make_unique<FullyConnectedLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
560 func->configure(input, weights, biases, output, fc_info);
561
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100562 const bool is_quantized = is_data_type_quantized_asymmetric(input->info()->data_type());
563
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100564 // Log info
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100565 std::ostringstream qss;
566 if(is_quantized)
567 {
568 qss << " Input QuantInfo: " << input->info()->quantization_info()
569 << " Weights QuantInfo: " << weights->info()->quantization_info()
570 << " Output QuantInfo: " << output->info()->quantization_info();
571 }
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100572 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
573 << " Target " << TargetInfo::TargetType
574 << " Data Type: " << input->info()->data_type()
Georgios Pinitasfd7e8532018-09-07 10:51:27 +0100575 << qss.str()
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100576 << " Input shape: " << input->info()->tensor_shape()
577 << " Weights shape: " << weights->info()->tensor_shape()
578 << " Output shape: " << output->info()->tensor_shape()
579 << std::endl);
580
581 return std::move(func);
582}
583
584/** Create a backend normalization layer function
585 *
586 * @tparam NormalizationLayerFunction Backend normalization function
587 * @tparam TargetInfo Target-specific information
588 *
589 * @param[in] node Node to create the backend function for
590 * @param[in] ctx Graph context
591 *
592 * @return Backend normalization layer function
593 */
594template <typename NormalizationLayerFunction, typename TargetInfo>
595std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node, GraphContext &ctx)
596{
597 ARM_COMPUTE_UNUSED(ctx);
598
599 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
600
601 // Extract IO and info
602 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
603 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
604 const NormalizationLayerInfo norm_info = node.normalization_info();
605 ARM_COMPUTE_ERROR_ON(input == nullptr);
606 ARM_COMPUTE_ERROR_ON(output == nullptr);
607
608 // Create and configure function
609 auto func = support::cpp14::make_unique<NormalizationLayerFunction>();
610 func->configure(input, output, norm_info);
611
612 // Log info
613 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
614 << " Target " << TargetInfo::TargetType
615 << " Data Type: " << input->info()->data_type()
616 << " Input shape: " << input->info()->tensor_shape()
617 << " Output shape: " << output->info()->tensor_shape()
618 << " Normalization info: " << norm_info.type()
619 << std::endl);
620
621 return std::move(func);
622}
623
Georgios Pinitas57c48242018-08-02 13:41:49 +0100624/** Create a backend permute layer function
625 *
626 * @tparam PermuteLayerFunction Backend permute function
627 * @tparam TargetInfo Target-specific information
628 *
629 * @param[in] node Node to create the backend function for
630 *
631 * @return Backend permute layer function
632 */
633template <typename PermuteLayerFunction, typename TargetInfo>
634std::unique_ptr<IFunction> create_permute_layer(PermuteLayerNode &node)
635{
636 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
637
638 // Extract IO and info
639 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
640 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
641 const PermutationVector &perm = node.permutation_vector();
642 ARM_COMPUTE_ERROR_ON(input == nullptr);
643 ARM_COMPUTE_ERROR_ON(output == nullptr);
644
645 // Create and configure function
646 auto func = support::cpp14::make_unique<PermuteLayerFunction>();
647 func->configure(input, output, perm);
648
649 // Log info
650 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
651 << " Target " << TargetInfo::TargetType
652 << " Data Type: " << input->info()->data_type()
653 << " Input shape: " << input->info()->tensor_shape()
654 << " Output shape: " << output->info()->tensor_shape()
655 << " Permutation vector: " << perm
656 << std::endl);
657
658 return std::move(func);
659}
660
Georgios Pinitasda2491f2018-06-01 17:49:09 +0100661/** Create a backend pooling layer function
662 *
663 * @tparam PoolingLayerFunction Backend pooling function
664 * @tparam TargetInfo Target-specific information
665 *
666 * @param[in] node Node to create the backend function for
667 *
668 * @return Backend pooling layer function
669 */
670template <typename PoolingLayerFunction, typename TargetInfo>
671std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
672{
673 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
674
675 // Extract IO and info
676 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
677 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
678 const PoolingLayerInfo pool_info = node.pooling_info();
679 ARM_COMPUTE_ERROR_ON(input == nullptr);
680 ARM_COMPUTE_ERROR_ON(output == nullptr);
681
682 // Create and configure function
683 auto func = support::cpp14::make_unique<PoolingLayerFunction>();
684 func->configure(input, output, pool_info);
685
686 // Log info
687 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
688 << " Target " << TargetInfo::TargetType
689 << " Data Type: " << input->info()->data_type()
690 << " Input shape: " << input->info()->tensor_shape()
691 << " Output shape: " << output->info()->tensor_shape()
692 << " Pooling info: " << pool_info.pool_type()
693 << std::endl);
694
695 return std::move(func);
696}
697
698/** Create a backend reshape layer function
699 *
700 * @tparam ReshapeLayerFunction Backend reshape function
701 * @tparam TargetInfo Target-specific information
702 *
703 * @param[in] node Node to create the backend function for
704 *
705 * @return Backend reshape layer function
706 */
707template <typename ReshapeLayerFunction, typename TargetInfo>
708std::unique_ptr<IFunction> create_reshape_layer(ReshapeLayerNode &node)
709{
710 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
711
712 // Extract IO and info
713 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
714 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
715 ARM_COMPUTE_ERROR_ON(input == nullptr);
716 ARM_COMPUTE_ERROR_ON(output == nullptr);
717
718 // Create and configure function
719 auto func = support::cpp14::make_unique<ReshapeLayerFunction>();
720 func->configure(input, output);
721
722 // Log info
723 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
724 << " Target " << TargetInfo::TargetType
725 << " Data Type: " << input->info()->data_type()
726 << " Input shape: " << input->info()->tensor_shape()
727 << " Output shape: " << output->info()->tensor_shape()
728 << std::endl);
729
730 return std::move(func);
731}
732
733/** Create a backend resize layer function
734 *
735 * @tparam ResizeLayerFunction Backend resize function
736 * @tparam TargetInfo Target-specific information
737 *
738 * @param[in] node Node to create the backend function for
739 *
740 * @return Backend resize layer function
741 */
742template <typename ResizeLayerFunction, typename TargetInfo>
743std::unique_ptr<IFunction> create_resize_layer(ResizeLayerNode &node)
744{
745 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
746
747 // Extract IO and info
748 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
749 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
750 ARM_COMPUTE_ERROR_ON(input == nullptr);
751 ARM_COMPUTE_ERROR_ON(output == nullptr);
752 const InterpolationPolicy policy = node.policy();
753
754 // Create and configure function
755 auto func = support::cpp14::make_unique<ResizeLayerFunction>();
756 func->configure(input, output, policy, BorderMode::CONSTANT);
757
758 // Log info
759 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
760 << " Target " << TargetInfo::TargetType
761 << " Data Type: " << input->info()->data_type()
762 << " Input shape: " << input->info()->tensor_shape()
763 << " Output shape: " << output->info()->tensor_shape()
764 << " Interpolation: " << policy
765 << std::endl);
766
767 return std::move(func);
768}
769
770/** Create a backend softmax layer function
771 *
772 * @tparam SoftmaxLayerFunction Backend softmax function
773 * @tparam TargetInfo Target-specific information
774 *
775 * @param[in] node Node to create the backend function for
776 * @param[in] ctx Graph context
777 *
778 * @return Backend softmax layer function
779 */
780template <typename SoftmaxLayerFunction, typename TargetInfo>
781std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
782{
783 validate_node<TargetInfo>(node, 1 /* expected inputs */, 1 /* expected outputs */);
784
785 // Extract IO and info
786 typename TargetInfo::TensorType *input = get_backing_tensor<TargetInfo>(node.input(0));
787 typename TargetInfo::TensorType *output = get_backing_tensor<TargetInfo>(node.output(0));
788 const float beta = node.beta();
789 ARM_COMPUTE_ERROR_ON(input == nullptr);
790 ARM_COMPUTE_ERROR_ON(output == nullptr);
791
792 // Create and configure function
793 auto func = support::cpp14::make_unique<SoftmaxLayerFunction>(get_memory_manager(ctx, TargetInfo::TargetType));
794 func->configure(input, output, beta);
795
796 // Log info
797 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << node.type()
798 << " Target " << TargetInfo::TargetType
799 << " Data Type: " << input->info()->data_type()
800 << " Input shape: " << input->info()->tensor_shape()
801 << " Output shape: " << output->info()->tensor_shape()
802 << std::endl);
803
804 return std::move(func);
805}
806} // namespace detail
807} // namespace backends
808} // namespace graph
809} // namespace arm_compute
810
811#endif /* __ARM_COMPUTE_GRAPH_BACKENDS_DETAIL_FUNCTION_HELPERS_H__ */