blob: 12e7c042d4acba964708e4f2acec23430ae3e888 [file] [log] [blame]
Georgios Pinitasfbb80542018-03-27 17:15:49 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Georgios Pinitasd9eb2752018-04-03 13:44:29 +010024#include "arm_compute/graph/backends/GLES/GCFunctionFactory.h"
Georgios Pinitasfbb80542018-03-27 17:15:49 +010025
26#include "arm_compute/core/utils/misc/Cast.h"
Georgios Pinitasd9eb2752018-04-03 13:44:29 +010027#include "arm_compute/graph/Graph.h"
28#include "arm_compute/graph/GraphContext.h"
29#include "arm_compute/graph/Logger.h"
30#include "arm_compute/graph/TypePrinter.h"
31#include "arm_compute/graph/Types.h"
32#include "arm_compute/graph/backends/Utils.h"
33#include "arm_compute/graph/nodes/Nodes.h"
Georgios Pinitasfbb80542018-03-27 17:15:49 +010034#include "arm_compute/runtime/GLES_COMPUTE/GCFunctions.h"
35
36#include "support/ToolchainSupport.h"
37
38using namespace arm_compute::utils::cast;
39
40namespace arm_compute
41{
Georgios Pinitasd9eb2752018-04-03 13:44:29 +010042namespace graph
Georgios Pinitasfbb80542018-03-27 17:15:49 +010043{
44namespace backends
45{
46namespace
47{
48/** Returns backing tensor of a given tensor
49 *
50 * @param[in] tensor Tensor to extract the backing tensor from
51 *
52 * @return Backing tensor if present else nullptr
53 */
Georgios Pinitasd9eb2752018-04-03 13:44:29 +010054arm_compute::IGCTensor *get_backing_tensor(arm_compute::graph::Tensor *tensor)
Georgios Pinitasfbb80542018-03-27 17:15:49 +010055{
56 arm_compute::IGCTensor *backing_tensor = nullptr;
57 if(tensor != nullptr)
58 {
Georgios Pinitasd9eb2752018-04-03 13:44:29 +010059 ARM_COMPUTE_ERROR_ON(tensor->desc().target != arm_compute::graph::Target::GC);
Georgios Pinitasfbb80542018-03-27 17:15:49 +010060 // Get backing tensor handle
61 ITensorHandle *tensor_handle = tensor->handle();
62 // Get backing tensor
63 backing_tensor = (tensor_handle != nullptr) ? polymorphic_cast<IGCTensor *>(&tensor_handle->tensor()) : nullptr;
64 }
65
66 return backing_tensor;
67}
68
69/** Create a backend activation layer function
70 *
71 * @param[in] node Node to create the backend function for
72 *
73 * @return Backend activation layer function
74 */
75std::unique_ptr<IFunction> create_activation_layer(ActivationLayerNode &node)
76{
77 ARM_COMPUTE_LOG_GRAPH_VERBOSE(
78 "Creating GC ActivationLayerNode node with ID : " << node.id() << " and Name: " << node.name()
79 << std::endl);
80 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
81 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
82
83 // Extract IO and info
84 IGCTensor *input = get_backing_tensor(node.input(0));
85 IGCTensor *output = get_backing_tensor(node.output(0));
86 const ActivationLayerInfo act_info = node.activation_info();
87
88 // Create function
89 auto func = support::cpp14::make_unique<GCActivationLayer>();
90 func->configure(input, output, act_info);
91
92 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated GCActivationLayer"
93 << " Data Type: " << input->info()->data_type()
94 << " Shape: " << input->info()->tensor_shape()
95 << " Activation function: " << act_info.activation()
96 << " a: " << act_info.a()
97 << " b: " << act_info.b()
98 << " InPlace : " << is_in_place_operation(input, output)
99 << std::endl);
100
101 return std::move(func);
102}
103
104/** Create a backend batch normalization layer function
105 *
106 * @param[in] node Node to create the backend function for
107 *
108 * @return Backend batch normalization layer function
109 */
110std::unique_ptr<IFunction> create_batch_normalization_layer(BatchNormalizationLayerNode &node)
111{
112 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating GC BatchNormalization node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
113
114 // TODO (geopin01) : Var and mean are compulsory, switch function to accept nullptr as beta and/or gamma
115 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 5);
116 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
117
118 // Extract IO and info
119 IGCTensor *input = get_backing_tensor(node.input(0));
120 IGCTensor *mean = get_backing_tensor(node.input(1));
121 IGCTensor *var = get_backing_tensor(node.input(2));
122 IGCTensor *beta = get_backing_tensor(node.input(3));
123 IGCTensor *gamma = get_backing_tensor(node.input(4));
124 IGCTensor *output = get_backing_tensor(node.output(0));
125 const float epsilon = node.epsilon();
126 const ActivationLayerInfo fused_act = node.fused_activation();
127
128 // Create and configure function
129 auto func = support::cpp14::make_unique<GCBatchNormalizationLayer>();
130 func->configure(input, output, mean, var, beta, gamma, epsilon, fused_act);
131
132 // Log info
133 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated GCBatchNormalizationLayer"
134 << " Data Type: " << input->info()->data_type()
135 << " Shape: " << input->info()->tensor_shape()
136 << " Epsilon: " << epsilon << " "
137 << (fused_act.enabled() ? to_string(fused_act.activation()) : "")
138 << " InPlace : " << is_in_place_operation(input, output)
139 << std::endl);
140
141 return std::move(func);
142}
143
144/** Create a backend convolution layer function
145 *
146 * @param[in] node Node to create the backend function for
147 *
148 * @return Backend convolution layer function
149 */
150std::unique_ptr<IFunction> create_convolution_layer(ConvolutionLayerNode &node, GraphContext &ctx)
151{
152 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating GC ConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
153 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
154 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
155
156 // Extract IO and info
157 IGCTensor *input = get_backing_tensor(node.input(0));
158 IGCTensor *weights = get_backing_tensor(node.input(1));
159 IGCTensor *biases = get_backing_tensor(node.input(2));
160 IGCTensor *output = get_backing_tensor(node.output(0));
161 const PadStrideInfo conv_info = node.convolution_info();
162 const ConvolutionMethod conv_algorithm = node.convolution_method();
163
164 // Create and configure function (we assume that functions have been validated before creation)
165 std::shared_ptr<IMemoryManager> mm = get_memory_manager(ctx, Target::GC);
166 std::unique_ptr<IFunction> func;
167 std::string func_name;
168
169 if(conv_algorithm == ConvolutionMethod::DIRECT)
170 {
171 std::tie(func, func_name) = create_named_function<GCDirectConvolutionLayer>(
172 std::string("GCDirectConvolutionLayer"), input, weights, biases, output, conv_info);
173 }
174 else
175 {
176 std::tie(func, func_name) = create_named_memory_managed_function<GCConvolutionLayer>(std::string("GCConvolutionLayer"), mm,
177 input, weights, biases, output, conv_info);
178 }
179
180 // Log info
181 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
182 << " Data Type: " << input->info()->data_type()
183 << " Input shape: " << input->info()->tensor_shape()
184 << " Weights shape: " << weights->info()->tensor_shape()
185 << " Output shape: " << output->info()->tensor_shape()
186 << std::endl);
187 return func;
188}
189
190/** Create a backend layer depth concatenate function
191 *
192 * @param[in] node Node to create the backend function for
193 *
194 * @return Backend depth concatenate layer function
195 */
196std::unique_ptr<arm_compute::IFunction> create_depth_concatenate_layer(DepthConcatenateLayerNode &node)
197{
198 ARM_COMPUTE_LOG_GRAPH_VERBOSE("Creating GC DepthConcatenate node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
199 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
200
201 // Return nullptr if depth concatenate is switched off
202 if(!node.is_enabled())
203 {
204 return nullptr;
205 }
206
207 // Extract IO and info
208 std::vector<arm_compute::IGCTensor *> inputs;
209 for(unsigned int i = 0; i < node.num_inputs(); ++i)
210 {
211 inputs.push_back(get_backing_tensor(node.input(i)));
212 }
213 IGCTensor *output = get_backing_tensor(node.output(0));
214
215 // Create and configure function
216 auto func = support::cpp14::make_unique<GCDepthConcatenateLayer>();
217 func->configure(inputs, output);
218
219 // Log info
220 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated GCDepthConcatenateLayer"
221 << " Data Type: " << output->info()->data_type()
222 << " Shape: " << output->info()->tensor_shape()
223 << " Num Inputs: " << inputs.size()
224 << std::endl);
225
226 return std::move(func);
227}
228
229/** Create a backend layer depth-wise convolution function
230 *
231 * @param[in] node Node to create the backend function for
232 *
233 * @return Backend depth-wise convolution layer function
234 */
235std::unique_ptr<IFunction> create_depthwise_convolution_layer(DepthwiseConvolutionLayerNode &node)
236{
237 ARM_COMPUTE_LOG_GRAPH_VERBOSE(
238 "Creating GC DepthwiseConvolutionLayer node with ID : " << node.id() << " and Name: " << node.name()
239 << std::endl);
240 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
241 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
242
243 // Extract IO and info
244 IGCTensor *input = get_backing_tensor(node.input(0));
245 IGCTensor *weights = get_backing_tensor(node.input(1));
246 IGCTensor *biases = get_backing_tensor(node.input(2));
247 IGCTensor *output = get_backing_tensor(node.output(0));
248 const PadStrideInfo conv_info = node.convolution_info();
249 const DepthwiseConvolutionMethod dwc_algorithm = node.depthwise_convolution_method();
250
251 // Create and configure function (we assume that functions have been validated before creation)
252 std::unique_ptr<IFunction> func;
253 std::string func_name;
254 if(dwc_algorithm == DepthwiseConvolutionMethod::OPTIMIZED_3x3)
255 {
256 std::tie(func, func_name) = create_named_function<GCDepthwiseConvolutionLayer3x3>(
257 std::string("GCDepthwiseConvolutionLayer3x3"), input, weights, biases, output, conv_info);
258 }
259 else
260 {
261 ARM_COMPUTE_ERROR("Generic DepthwiseConvolutionLayer is not supported in GLES backend");
262 }
263
264 // Log info
265 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
266 << " Data Type: " << input->info()->data_type()
267 << " Input shape: " << input->info()->tensor_shape()
268 << " Weights shape: " << weights->info()->tensor_shape()
269 << " Output shape: " << output->info()->tensor_shape()
270 << std::endl);
271 return func;
272}
273
274/** Create a backend element-wise operation layer function
275 *
276 * @param[in] node Node to create the backend function for
277 *
278 * @return Backend element-wise operation layer function
279 */
280std::unique_ptr<IFunction> create_eltwise_layer(EltwiseLayerNode &node)
281{
282 ARM_COMPUTE_LOG_GRAPH_VERBOSE(
283 "Creating GC EltwiseLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
284 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 2);
285 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
286
287 // Extract IO and info
288 IGCTensor *input1 = get_backing_tensor(node.input(0));
289 IGCTensor *input2 = get_backing_tensor(node.input(1));
290 IGCTensor *output = get_backing_tensor(node.output(0));
291 const EltwiseOperation eltwise_op = node.eltwise_operation();
292 ARM_COMPUTE_ERROR_ON(input1 == nullptr);
293 ARM_COMPUTE_ERROR_ON(input2 == nullptr);
294 ARM_COMPUTE_ERROR_ON(output == nullptr);
295
296 std::unique_ptr<IFunction> func = nullptr;
297 std::string func_name;
298 if(eltwise_op == EltwiseOperation::ADD)
299 {
300 std::tie(func, func_name) = create_named_function<GCArithmeticAddition>(std::string("GCArithmeticAddition"),
301 input1, input2, output,
302 ConvertPolicy::SATURATE);
303 }
304 else if(eltwise_op == EltwiseOperation::SUB)
305 {
306 ARM_COMPUTE_ERROR("Arithmetic subtraction is not supported in GLES backend");
307 }
308 else if(eltwise_op == EltwiseOperation::MUL)
309 {
310 std::tie(func, func_name) = create_named_function<GCPixelWiseMultiplication>(
311 std::string("GCPixelWiseMultiplication"), input1, input2, output, 1.f);
312 }
313 else
314 {
315 ARM_COMPUTE_ERROR("Unsupported element-wise operation!");
316 }
317
318 // Log info
319 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated " << func_name
320 << " Data Type: " << input1->info()->data_type()
321 << " Shape : " << input1->info()->tensor_shape()
322 << std::endl);
323
324 return func;
325}
326
327/** Create a backend fully connected layer function
328 *
329 * @param[in] node Node to create the backend function for
330 *
331 * @return Backend fully connected layer function
332 */
333std::unique_ptr<IFunction> create_fully_connected_layer(FullyConnectedLayerNode &node, GraphContext &ctx)
334{
335 ARM_COMPUTE_LOG_GRAPH_VERBOSE(
336 "Creating GC FullyConnectedLayer node with ID : " << node.id() << " and Name: " << node.name()
337 << std::endl);
338 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 3);
339 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
340
341 // Extract IO and info
342 IGCTensor *input = get_backing_tensor(node.input(0));
343 IGCTensor *weights = get_backing_tensor(node.input(1));
344 IGCTensor *biases = get_backing_tensor(node.input(2));
345 IGCTensor *output = get_backing_tensor(node.output(0));
346
347 // Create and configure function
348 auto func = support::cpp14::make_unique<GCFullyConnectedLayer>(get_memory_manager(ctx, Target::GC));
349 func->configure(input, weights, biases, output);
350 ARM_COMPUTE_ERROR_ON(input == nullptr);
351 ARM_COMPUTE_ERROR_ON(weights == nullptr);
352 ARM_COMPUTE_ERROR_ON(output == nullptr);
353
354 // Log info
355 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated GCFullyConnectedLayer"
356 << " Data Type: " << input->info()->data_type()
357 << " Input shape: " << input->info()->tensor_shape()
358 << " Weights shape: " << weights->info()->tensor_shape()
359 << " Biases Shape: " << biases->info()->tensor_shape()
360 << " Output shape: " << output->info()->tensor_shape()
361 << std::endl);
362
363 return std::move(func);
364}
365
366/** Create a backend normalization layer function
367 *
368 * @param[in] node Node to create the backend function for
369 *
370 * @return Backend normalization layer function
371 */
372std::unique_ptr<IFunction> create_normalization_layer(NormalizationLayerNode &node)
373{
374 ARM_COMPUTE_LOG_GRAPH_VERBOSE(
375 "Creating GC NormalizationLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
376 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
377 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
378
379 // Extract IO and info
380 IGCTensor *input = get_backing_tensor(node.input(0));
381 IGCTensor *output = get_backing_tensor(node.output(0));
382 const NormalizationLayerInfo norm_info = node.normalization_info();
383 ARM_COMPUTE_ERROR_ON(input == nullptr);
384 ARM_COMPUTE_ERROR_ON(output == nullptr);
385
386 // Create and configure function
387 auto func = support::cpp14::make_unique<GCNormalizationLayer>();
388 func->configure(input, output, norm_info);
389
390 // Log info
391 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated GCNormalizationLayer"
392 << " Data Type: " << input->info()->data_type()
393 << " Input shape: " << input->info()->tensor_shape()
394 << " Output shape: " << output->info()->tensor_shape()
395 << " Normalization info: " << norm_info.type()
396 << std::endl);
397
398 return std::move(func);
399}
400
401/** Create a backend pooling layer function
402 *
403 * @param[in] node Node to create the backend function for
404 *
405 * @return Backend pooling layer function
406 */
407std::unique_ptr<IFunction> create_pooling_layer(PoolingLayerNode &node)
408{
409 ARM_COMPUTE_LOG_GRAPH_VERBOSE(
410 "Creating GC PoolingLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
411 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
412 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
413
414 // Extract IO and info
415 IGCTensor *input = get_backing_tensor(node.input(0));
416 IGCTensor *output = get_backing_tensor(node.output(0));
417 const PoolingLayerInfo pool_info = node.pooling_info();
418 ARM_COMPUTE_ERROR_ON(input == nullptr);
419 ARM_COMPUTE_ERROR_ON(output == nullptr);
420
421 // Create and configure function
422 auto func = support::cpp14::make_unique<GCPoolingLayer>();
423 func->configure(input, output, pool_info);
424
425 // Log info
426 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated GCPoolingLayer"
427 << " Data Type: " << input->info()->data_type()
428 << " Input shape: " << input->info()->tensor_shape()
429 << " Output shape: " << output->info()->tensor_shape()
430 << " Pooling info: " << pool_info.pool_type()
431 << std::endl);
432
433 return std::move(func);
434}
435
436/** Create a backend softmax layer function
437 *
438 * @param[in] node Node to create the backend function for
439 *
440 * @return Backend softmax layer function
441 */
442std::unique_ptr<IFunction> create_softmax_layer(SoftmaxLayerNode &node, GraphContext &ctx)
443{
444 ARM_COMPUTE_LOG_GRAPH_VERBOSE(
445 "Creating GC SoftmaxLayer node with ID : " << node.id() << " and Name: " << node.name() << std::endl);
446 ARM_COMPUTE_ERROR_ON(node.num_inputs() != 1);
447 ARM_COMPUTE_ERROR_ON(node.num_outputs() != 1);
448
449 // Extract IO and info
450 IGCTensor *input = get_backing_tensor(node.input(0));
451 IGCTensor *output = get_backing_tensor(node.output(0));
452 const float beta = node.beta();
453 ARM_COMPUTE_ERROR_ON(input == nullptr);
454 ARM_COMPUTE_ERROR_ON(output == nullptr);
455
456 // Create and configure function
457 auto func = support::cpp14::make_unique<GCSoftmaxLayer>(get_memory_manager(ctx, Target::CL));
458 func->configure(input, output, beta);
459
460 // Log info
461 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiated GCSoftmaxLayer"
462 << " Data Type: " << input->info()->data_type()
463 << " Input shape: " << input->info()->tensor_shape()
464 << " Output shape: " << output->info()->tensor_shape()
465 << std::endl);
466
467 return std::move(func);
468}
469} // namespace
470
471std::unique_ptr<IFunction> GCFunctionFactory::create(INode *node, GraphContext &ctx)
472{
473 if(node == nullptr)
474 {
475 return nullptr;
476 }
477
478 NodeType type = node->type();
479 switch(type)
480 {
481 case NodeType::ActivationLayer:
482 return create_activation_layer(*polymorphic_downcast<ActivationLayerNode *>(node));
483 case NodeType::BatchNormalizationLayer:
484 return create_batch_normalization_layer(*polymorphic_downcast<BatchNormalizationLayerNode *>(node));
485 case NodeType::ConvolutionLayer:
486 return create_convolution_layer(*polymorphic_downcast<ConvolutionLayerNode *>(node), ctx);
487 case NodeType::DepthConcatenateLayer:
488 return create_depth_concatenate_layer(*polymorphic_downcast<DepthConcatenateLayerNode *>(node));
489 case NodeType::DepthwiseConvolutionLayer:
490 return create_depthwise_convolution_layer(*polymorphic_downcast<DepthwiseConvolutionLayerNode *>(node));
491 case NodeType::EltwiseLayer:
492 return create_eltwise_layer(*polymorphic_downcast<EltwiseLayerNode *>(node));
493 case NodeType::FullyConnectedLayer:
494 return create_fully_connected_layer(*polymorphic_downcast<FullyConnectedLayerNode *>(node), ctx);
495 case NodeType::NormalizationLayer:
496 return create_normalization_layer(*polymorphic_downcast<NormalizationLayerNode *>(node));
497 case NodeType::PoolingLayer:
498 return create_pooling_layer(*polymorphic_downcast<PoolingLayerNode *>(node));
499 case NodeType::SoftmaxLayer:
500 return create_softmax_layer(*polymorphic_downcast<SoftmaxLayerNode *>(node), ctx);
501 default:
502 return nullptr;
503 }
504}
505} // namespace backends
Georgios Pinitasd9eb2752018-04-03 13:44:29 +0100506} // namespace graph
Georgios Pinitasfbb80542018-03-27 17:15:49 +0100507} // namespace arm_compute