blob: 4ec3a22f37256290111e1e17a54819b01e13faa8 [file] [log] [blame]
Georgios Pinitas407c3e62017-10-25 18:26:46 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/CL/ICLTensor.h"
25#include "arm_compute/core/Error.h"
26#include "arm_compute/graph/IOperation.h"
27#include "arm_compute/graph/NodeContext.h"
28#include "arm_compute/graph/OperationRegistrar.h"
29#include "arm_compute/graph/Types.h"
30#include "arm_compute/runtime/CL/CLFunctions.h"
31#include "support/ToolchainSupport.h"
32#include "utils/GraphTypePrinter.h"
33#include "utils/TypePrinter.h"
34
35#include <memory>
36
37using namespace arm_compute::graph;
38
39/* Activation Layer */
40REGISTER_SIMPLE_OPERATION(CLActivationLayerOperation, OPENCL, OperationType::ActivationLayer)
41{
42 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
43 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
44 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
45 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
46
47 // Extract IO and info
48 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
49 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
50 const auto act_info = ctx.parameter<ActivationLayerInfo>("ActivationLayerInfo");
51
52 // Create and configure function
53 auto activation = arm_compute::support::cpp14::make_unique<arm_compute::CLActivationLayer>();
54 activation->configure(in, out, act_info);
55
56 // Log info
57 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLActivationLayer"
58 << " Data Type: " << in->info()->data_type()
59 << " Input shape: " << in->info()->tensor_shape()
60 << " Output shape: " << out->info()->tensor_shape()
61 << " Activation function: " << act_info.activation()
62 << " a: " << act_info.a()
63 << " b: " << act_info.b()
64 << std::endl);
65
66 return std::move(activation);
67}
68
69/* Batch Normalization Layer */
70REGISTER_SIMPLE_OPERATION(CLBatchNormalizationLayerOperation, OPENCL, OperationType::BatchNormalizationLayer)
71{
72 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 5);
73 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
74 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
75 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
76 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr);
77 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3)) == nullptr);
78 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4)) == nullptr);
79 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
80
81 // Extract IO and info
82 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
83 auto *mean = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
84 auto *var = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
85 auto *beta = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(3));
86 auto *gamma = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(4));
87 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
88 const auto epsilon = ctx.parameter<float>("epsilon");
89
90 // Create and configure function
91 auto batch_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLBatchNormalizationLayer>();
92 batch_norm->configure(in, out, mean, var, beta, gamma, epsilon);
93
94 // Log info
95 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLBatchNormalizationLayer"
96 << " Data Type: " << in->info()->data_type()
97 << " Input shape: " << in->info()->tensor_shape()
98 << " Output shape: " << out->info()->tensor_shape()
99 << " Mean shape: " << mean->info()->tensor_shape()
100 << " Var shape: " << var->info()->tensor_shape()
101 << " Beta shape: " << beta->info()->tensor_shape()
102 << " Gamma shape: " << gamma->info()->tensor_shape()
103 << " Epsilon: " << epsilon
104 << std::endl);
105
106 return std::move(batch_norm);
107}
108
Michalis Spyrou27c9efb2017-10-09 15:46:30 +0100109/* DepthConvert Layer */
110REGISTER_SIMPLE_OPERATION(CLDepthConvertLayerOperation, OPENCL, OperationType::DepthConvertLayer)
111{
112 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
113 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
114 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
115 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
116
117 // Extract IO and info
118 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
119 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
120 const auto conv_policy = ctx.parameter<ConvertPolicy>("ConvertPolicy");
121 const auto shift = ctx.parameter<uint32_t>("shift");
122
123 // Create and configure function
124 auto depthconvert = arm_compute::support::cpp14::make_unique<arm_compute::CLDepthConvert>();
125 depthconvert->configure(in, out, conv_policy, shift);
126
127 // Log info
128 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDepthConvertLayer"
129 << " Data Type: " << in->info()->data_type()
130 << " Input shape: " << in->info()->tensor_shape()
131 << " Output shape: " << out->info()->tensor_shape()
132 << " shift: " << shift
133 << std::endl);
134
135 return std::move(depthconvert);
136}
137
138/* DeQuantizationLayer Layer */
139REGISTER_SIMPLE_OPERATION(CLDequantizationLayerOperation, OPENCL, OperationType::DequantizationLayer)
140{
141 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
142 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 2);
143 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
144 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
145 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(1)) == nullptr);
146
147 // Extract IO and info
148 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
149 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
150 auto *min_max = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(1));
151
152 // Create and configure function
153 auto dequantization = arm_compute::support::cpp14::make_unique<arm_compute::CLDequantizationLayer>();
154 dequantization->configure(in, out, min_max);
155
156 // Log info
157 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLDequantizationLayer"
158 << " Data Type: " << in->info()->data_type()
159 << " Input shape: " << in->info()->tensor_shape()
160 << " Output shape: " << out->info()->tensor_shape()
161 << " Min max shape: " << min_max->info()->tensor_shape()
162 << std::endl);
163
164 return std::move(dequantization);
165}
166
167/* Flatten Layer */
168REGISTER_SIMPLE_OPERATION(CLFlattenLayerOperation, OPENCL, OperationType::FlattenLayer)
169{
170 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
171 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
172 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
173 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
174
175 // Extract IO and info
176 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
177 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
178
179 // Create and configure function
180 auto flatten = arm_compute::support::cpp14::make_unique<arm_compute::CLFlattenLayer>();
181 flatten->configure(in, out);
182
183 // Log info
Isabella Gottardid0f70992017-11-03 14:48:14 +0000184 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFlattenLayer"
Michalis Spyrou27c9efb2017-10-09 15:46:30 +0100185 << " Data Type: " << in->info()->data_type()
186 << " Input shape: " << in->info()->tensor_shape()
187 << " Output shape: " << out->info()->tensor_shape()
188 << std::endl);
189
190 return std::move(flatten);
191}
192
Georgios Pinitas407c3e62017-10-25 18:26:46 +0100193/* Floor Layer */
194REGISTER_SIMPLE_OPERATION(CLFloorLayerOperation, OPENCL, OperationType::FloorLayer)
195{
196 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
197 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
198 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
199 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
200
201 // Extract IO and info
202 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
203 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
204
205 // Create and configure function
206 auto floor = arm_compute::support::cpp14::make_unique<arm_compute::CLFloor>();
207 floor->configure(in, out);
208
209 // Log info
210 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFloorLayer"
211 << " Data Type: " << in->info()->data_type()
212 << " Input shape: " << in->info()->tensor_shape()
213 << " Output shape: " << out->info()->tensor_shape()
214 << std::endl);
215
216 return std::move(floor);
217}
218
219/* Fully Connected Layer */
220REGISTER_SIMPLE_OPERATION(CLFullyConnectedLayer, OPENCL, OperationType::FullyConnectedLayer)
221{
222 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 3);
223 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
224 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
225 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1)) == nullptr);
226 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2)) == nullptr);
227 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
228
229 // Extract IO and info
230 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
231 auto *weights = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(1));
232 auto *biases = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(2));
233 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
234
235 // Create and configure function
236 auto fc = arm_compute::support::cpp14::make_unique<arm_compute::CLFullyConnectedLayer>();
237 fc->configure(in, weights, biases, out);
238
239 // Log info
Isabella Gottardid0f70992017-11-03 14:48:14 +0000240 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLFullyConnectedLayer"
Georgios Pinitas407c3e62017-10-25 18:26:46 +0100241 << " Data Type: " << in->info()->data_type()
242 << " Input shape: " << in->info()->tensor_shape()
243 << " Weights shape: " << weights->info()->tensor_shape()
244 << " Biases Shape: " << biases->info()->tensor_shape()
245 << " Output shape: " << out->info()->tensor_shape()
246 << std::endl);
247
248 return std::move(fc);
249}
250
251/* L2 Normalize Layer */
252REGISTER_SIMPLE_OPERATION(CLL2NormalizeLayerOperation, OPENCL, OperationType::L2NormalizeLayer)
253{
254 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
255 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
256 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
257 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
258
259 // Extract IO and info
260 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
261 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
262 const auto axis = ctx.parameter<unsigned int>("axis");
263 const auto epsilon = ctx.parameter<float>("epsilon");
264
265 // Create and configure function
266 auto l2_norm = arm_compute::support::cpp14::make_unique<arm_compute::CLL2Normalize>();
267 l2_norm->configure(in, out, axis, epsilon);
268
269 // Log info
270 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLL2NormalizeLayer"
271 << " Data Type: " << in->info()->data_type()
272 << " Input shape: " << in->info()->tensor_shape()
273 << " Output shape: " << out->info()->tensor_shape()
274 << " Axis: " << axis
275 << " Epsilon: " << epsilon
276 << std::endl);
277
278 return std::move(l2_norm);
279}
280
281/* Normalization Layer */
282REGISTER_SIMPLE_OPERATION(CLNormalizationLayerOperation, OPENCL, OperationType::NormalizationLayer)
283{
284 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
285 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
286 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
287 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
288
289 // Extract IO and info
290 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
291 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
292 const auto norm_info = ctx.parameter<NormalizationLayerInfo>("NormalizationLayerInfo");
293
294 // Create and configure function
295 auto norm = arm_compute::support::cpp14::make_unique<arm_compute::CLNormalizationLayer>();
296 norm->configure(in, out, norm_info);
297
298 // Log info
299 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLNormalizationLayer"
300 << " Data Type: " << in->info()->data_type()
301 << " Input shape: " << in->info()->tensor_shape()
302 << " Output shape: " << out->info()->tensor_shape()
303 << " Normalization info: " << norm_info
304 << std::endl);
305
306 return std::move(norm);
307}
308
309/* Pooling Layer */
310REGISTER_SIMPLE_OPERATION(CLPoolingLayerOperation, OPENCL, OperationType::PoolingLayer)
311{
312 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
313 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
314 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
315 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
316
317 // Extract IO and info
318 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
319 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
320 const auto pool_info = ctx.parameter<PoolingLayerInfo>("PoolingLayerInfo");
321
322 // Create and configure function
323 auto pool = arm_compute::support::cpp14::make_unique<arm_compute::CLPoolingLayer>();
324 pool->configure(in, out, pool_info);
325
326 // Log info
327 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLPoolingLayer"
328 << " Data Type: " << in->info()->data_type()
329 << " Input shape: " << in->info()->tensor_shape()
330 << " Output shape: " << out->info()->tensor_shape()
331 << " Pooling info: " << pool_info
332 << std::endl);
333
334 return std::move(pool);
335}
336
Michalis Spyrou27c9efb2017-10-09 15:46:30 +0100337/* Quantization Layer */
338REGISTER_SIMPLE_OPERATION(CLQuantizationLayerOperation, OPENCL, OperationType::QuantizationLayer)
339{
340 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
341 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
342 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
343 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
344
345 // Extract IO and info
346 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
347 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
348
349 // Create and configure function
350 auto quantization = arm_compute::support::cpp14::make_unique<arm_compute::CLQuantizationLayer>();
351 quantization->configure(in, out);
352
353 // Log info
Isabella Gottardid0f70992017-11-03 14:48:14 +0000354 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLQuantizationLayer"
Michalis Spyrou27c9efb2017-10-09 15:46:30 +0100355 << " Data Type: " << in->info()->data_type()
356 << " Input shape: " << in->info()->tensor_shape()
357 << " Output shape: " << out->info()->tensor_shape()
358 << std::endl);
359
360 return std::move(quantization);
361}
362
363/* Reshape Layer */
364REGISTER_SIMPLE_OPERATION(CLReshapeLayerOperation, OPENCL, OperationType::ReshapeLayer)
365{
366 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
367 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
368 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
369 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
370
371 // Extract IO and info
372 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
373 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
374
375 // Create and configure function
376 auto reshape = arm_compute::support::cpp14::make_unique<arm_compute::CLReshapeLayer>();
377 reshape->configure(in, out);
378
379 // Log info
Isabella Gottardid0f70992017-11-03 14:48:14 +0000380 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLReshapeLayer"
Michalis Spyrou27c9efb2017-10-09 15:46:30 +0100381 << " Data Type: " << in->info()->data_type()
382 << " Input shape: " << in->info()->tensor_shape()
383 << " Output shape: " << out->info()->tensor_shape()
384 << std::endl);
385
386 return std::move(reshape);
387}
388
Georgios Pinitas407c3e62017-10-25 18:26:46 +0100389/* Softmax Layer */
390REGISTER_SIMPLE_OPERATION(CLSoftmaxLayerOperation, OPENCL, OperationType::SoftmaxLayer)
391{
392 ARM_COMPUTE_ERROR_ON(ctx.num_inputs() != 1);
393 ARM_COMPUTE_ERROR_ON(ctx.num_outputs() != 1);
394 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0)) == nullptr);
395 ARM_COMPUTE_ERROR_ON(dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0)) == nullptr);
396
397 // Extract IO and info
398 auto *in = dynamic_cast<arm_compute::ICLTensor *>(ctx.input(0));
399 auto *out = dynamic_cast<arm_compute::ICLTensor *>(ctx.output(0));
400
401 // Create and configure function
402 auto smx = arm_compute::support::cpp14::make_unique<arm_compute::CLSoftmaxLayer>();
403 smx->configure(in, out);
404
405 // Log info
406 ARM_COMPUTE_LOG_GRAPH_INFO("Instantiating CLSoftmaxLayer"
407 << " Data Type: " << in->info()->data_type()
408 << " Input shape: " << in->info()->tensor_shape()
409 << " Output shape: " << out->info()->tensor_shape()
410 << std::endl);
411
412 return std::move(smx);
413}