blob: c17d07695505a1d22af2f19fc4be178b9f3ba38e [file] [log] [blame]
Ferran Balaguerf54c9462019-07-10 12:43:58 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn/BackendHelper.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +00007#include <armnn/BackendRegistry.hpp>
Matthew Sloyan81beae32021-07-13 19:46:11 +01008#include <armnn/Logging.hpp>
Matteo Martincighc601aa62019-10-29 15:03:22 +00009
Matteo Martincighe5b8eb92019-11-28 15:45:42 +000010#include <armnn/backends/IBackendInternal.hpp>
Ferran Balaguerf54c9462019-07-10 12:43:58 +010011
12namespace armnn
13{
14
Francis Murtagh7909c532021-01-28 14:25:15 +000015// Return LayerSupportHandle instead of the previous pointer to ILayerSupport.
16LayerSupportHandle GetILayerSupportByBackendId(const armnn::BackendId& backend)
Ferran Balaguerf54c9462019-07-10 12:43:58 +010017{
18 BackendRegistry& backendRegistry = armnn::BackendRegistryInstance();
19
20 if (!backendRegistry.IsBackendRegistered(backend))
21 {
Francis Murtagh7909c532021-01-28 14:25:15 +000022 return LayerSupportHandle(nullptr);
Ferran Balaguerf54c9462019-07-10 12:43:58 +010023 }
24
25 auto factoryFunc = backendRegistry.GetFactory(backend);
26 auto backendObject = factoryFunc();
Sadik Armaganf0a6dec2021-03-25 07:46:55 +000027 return LayerSupportHandle(backendObject->GetLayerSupport(), backend);
28}
29
Finn Williamsb9af86e2021-05-26 18:38:12 +010030Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
31 const BackendCapabilities& capabilities)
32{
33 for (size_t i=0; i < capabilities.GetOptionCount(); i++)
34 {
35 const auto& capability = capabilities.GetOption(i);
36 if (backendCapabilityName == capability.GetName())
37 {
38 return capability;
39 }
40 }
41 return EmptyOptional();
42}
43
44Optional<const BackendOptions::BackendOption> GetCapability(const std::string& backendCapabilityName,
45 const armnn::BackendId& backend)
46{
47 auto const& backendRegistry = armnn::BackendRegistryInstance();
48 if (backendRegistry.IsBackendRegistered(backend))
49 {
50 auto factoryFunc = backendRegistry.GetFactory(backend);
51 auto backendObject = factoryFunc();
52 auto capabilities = backendObject->GetCapabilities();
53 return GetCapability(backendCapabilityName, capabilities);
54 }
55 return EmptyOptional();
56}
57
58bool HasCapability(const std::string& name, const BackendCapabilities& capabilities)
59{
60 return GetCapability(name, capabilities).has_value();
61}
62
63bool HasCapability(const std::string& name, const armnn::BackendId& backend)
64{
65 return GetCapability(name, backend).has_value();
66}
67
68bool HasCapability(const BackendOptions::BackendOption& capability, const BackendCapabilities& capabilities)
69{
70 for (size_t i=0; i < capabilities.GetOptionCount(); i++)
71 {
72 const auto& backendCapability = capabilities.GetOption(i);
73 if (capability.GetName() == backendCapability.GetName())
74 {
75 if (capability.GetValue().IsBool() && backendCapability.GetValue().IsBool())
76 {
77 return capability.GetValue().AsBool() == backendCapability.GetValue().AsBool();
78 }
79 else if(capability.GetValue().IsFloat() && backendCapability.GetValue().IsFloat())
80 {
81 return capability.GetValue().AsFloat() == backendCapability.GetValue().AsFloat();
82 }
83 else if(capability.GetValue().IsInt() && backendCapability.GetValue().IsInt())
84 {
85 return capability.GetValue().AsInt() == backendCapability.GetValue().AsInt();
86 }
87 else if(capability.GetValue().IsString() && backendCapability.GetValue().IsString())
88 {
89 return capability.GetValue().AsString() == backendCapability.GetValue().AsString();
90 }
91 else if(capability.GetValue().IsUnsignedInt() && backendCapability.GetValue().IsUnsignedInt())
92 {
93 return capability.GetValue().AsUnsignedInt() == backendCapability.GetValue().AsUnsignedInt();
94 }
95 }
96 }
97 return false;
98}
99
100bool HasCapability(const BackendOptions::BackendOption& backendOption, const armnn::BackendId& backend)
101{
102 auto const& backendRegistry = armnn::BackendRegistryInstance();
103 if (backendRegistry.IsBackendRegistered(backend))
104 {
105 auto factoryFunc = backendRegistry.GetFactory(backend);
106 auto backendObject = factoryFunc();
107 auto capabilities = backendObject->GetCapabilities();
108 return HasCapability(backendOption, capabilities);
109 }
110 return false;
111}
112
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000113/// Convenience function to check a capability on a backend
114bool IsCapabilitySupported(const armnn::BackendId& backend, armnn::BackendCapability capability)
115{
116 bool hasCapability = false;
117 auto const& backendRegistry = armnn::BackendRegistryInstance();
118 if (backendRegistry.IsBackendRegistered(backend))
119 {
120 auto factoryFunc = backendRegistry.GetFactory(backend);
121 auto backendObject = factoryFunc();
Finn Williamsb9af86e2021-05-26 18:38:12 +0100122 ARMNN_NO_DEPRECATE_WARN_BEGIN
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000123 hasCapability = backendObject->HasCapability(capability);
Finn Williamsb9af86e2021-05-26 18:38:12 +0100124 ARMNN_NO_DEPRECATE_WARN_END
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000125 }
126 return hasCapability;
Ferran Balaguerf54c9462019-07-10 12:43:58 +0100127}
128
Francis Murtagh7909c532021-01-28 14:25:15 +0000129bool LayerSupportHandle::IsBackendRegistered() const
130{
131 if (m_LayerSupport)
132 {
133 return true;
134 }
135
136 return false;
Ferran Balaguerf54c9462019-07-10 12:43:58 +0100137}
Francis Murtagh7909c532021-01-28 14:25:15 +0000138
139
140bool LayerSupportHandle::IsAbsSupported(const TensorInfo& input,
141 const TensorInfo& output,
142 Optional<std::string&> reasonIfUnsupported)
143{
144 // Call the IsXXXLayerSupport function of the specific backend.
145 return m_LayerSupport->IsAbsSupported(input, output, reasonIfUnsupported.value());
146}
147
148bool LayerSupportHandle::IsActivationSupported(const TensorInfo& input,
149 const TensorInfo& output,
150 const ActivationDescriptor& descriptor,
151 Optional<std::string&> reasonIfUnsupported)
152{
153 return m_LayerSupport->IsActivationSupported(input, output, descriptor, reasonIfUnsupported.value());
154}
155
156bool LayerSupportHandle::IsAdditionSupported(const TensorInfo& input0,
157 const TensorInfo& input1,
158 const TensorInfo& output,
159 Optional<std::string&> reasonIfUnsupported)
160{
161 return m_LayerSupport->IsAdditionSupported(input0, input1, output, reasonIfUnsupported.value());
162}
163
164bool LayerSupportHandle::IsArgMinMaxSupported(const TensorInfo& input,
165 const TensorInfo& output,
166 const ArgMinMaxDescriptor& descriptor,
167 Optional<std::string&> reasonIfUnsupported)
168{
169 return m_LayerSupport->IsArgMinMaxSupported(input, output, descriptor, reasonIfUnsupported.value());
170}
171
172bool LayerSupportHandle::IsBatchNormalizationSupported(const TensorInfo& input,
173 const TensorInfo& output,
174 const TensorInfo& mean,
175 const TensorInfo& var,
176 const TensorInfo& beta,
177 const TensorInfo& gamma,
178 const BatchNormalizationDescriptor& descriptor,
179 Optional<std::string&> reasonIfUnsupported)
180{
181 return m_LayerSupport->IsBatchNormalizationSupported(input,
182 output,
183 mean,
184 var,
185 beta,
186 gamma,
187 descriptor,
188 reasonIfUnsupported.value());
189}
190
191bool LayerSupportHandle::IsBatchToSpaceNdSupported(const TensorInfo& input,
192 const TensorInfo& output,
193 const BatchToSpaceNdDescriptor& descriptor,
194 Optional<std::string&> reasonIfUnsupported)
195{
196 return m_LayerSupport->IsBatchToSpaceNdSupported(input,
197 output,
198 descriptor,
199 reasonIfUnsupported.value());
200}
201
mathad01b392e982021-04-07 12:07:30 +0100202bool LayerSupportHandle::IsCastSupported(const TensorInfo& input,
203 const TensorInfo& output,
204 Optional<std::string&> reasonIfUnsupported)
205{
206 return m_LayerSupport->IsCastSupported(input, output, reasonIfUnsupported.value());
207}
208
Simon Obute51f67772021-09-03 15:50:13 +0100209bool LayerSupportHandle::IsChannelShuffleSupported(const TensorInfo &input, const TensorInfo &output,
210 const ChannelShuffleDescriptor &descriptor,
211 Optional<std::string &> reasonIfUnsupported)
212{
213 return m_LayerSupport->IsChannelShuffleSupported(input,
214 output,
215 descriptor,
216 reasonIfUnsupported.value());
217}
218
Francis Murtagh7909c532021-01-28 14:25:15 +0000219bool LayerSupportHandle::IsComparisonSupported(const TensorInfo& input0,
220 const TensorInfo& input1,
221 const TensorInfo& output,
222 const ComparisonDescriptor& descriptor,
223 Optional<std::string&> reasonIfUnsupported)
224{
225 return m_LayerSupport->IsComparisonSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
226}
227
228bool LayerSupportHandle::IsConcatSupported(const std::vector<const TensorInfo*> inputs,
229 const TensorInfo& output,
230 const OriginsDescriptor& descriptor,
231 Optional<std::string&> reasonIfUnsupported)
232{
233 return m_LayerSupport->IsConcatSupported(inputs, output, descriptor, reasonIfUnsupported.value());
234}
235
236bool LayerSupportHandle::IsConstantSupported(const TensorInfo& output,
237 Optional<std::string&> reasonIfUnsupported)
238{
239 return m_LayerSupport->IsConstantSupported(output, reasonIfUnsupported.value());
240}
241
242bool LayerSupportHandle::IsConvertBf16ToFp32Supported(const TensorInfo& input,
243 const TensorInfo& output,
244 Optional<std::string&> reasonIfUnsupported)
245{
246 return m_LayerSupport->IsConvertBf16ToFp32Supported(input, output, reasonIfUnsupported.value());
247}
248
249bool LayerSupportHandle::IsConvertFp32ToBf16Supported(const TensorInfo& input,
250 const TensorInfo& output,
251 Optional<std::string&> reasonIfUnsupported)
252{
253 return m_LayerSupport->IsConvertFp32ToBf16Supported(input, output, reasonIfUnsupported.value());
254}
255
256bool LayerSupportHandle::IsConvertFp16ToFp32Supported(const TensorInfo& input,
257 const TensorInfo& output,
258 Optional<std::string&> reasonIfUnsupported)
259{
260 return m_LayerSupport->IsConvertFp16ToFp32Supported(input, output, reasonIfUnsupported.value());
261}
262
263bool LayerSupportHandle::IsConvertFp32ToFp16Supported(const TensorInfo& input,
264 const TensorInfo& output,
265 Optional<std::string&> reasonIfUnsupported)
266{
267 return m_LayerSupport->IsConvertFp32ToFp16Supported(input, output, reasonIfUnsupported.value());
268}
269
270bool LayerSupportHandle::IsConvolution2dSupported(const TensorInfo& input,
271 const TensorInfo& output,
272 const Convolution2dDescriptor& descriptor,
273 const TensorInfo& weights,
274 const Optional<TensorInfo>& biases,
275 Optional<std::string&> reasonIfUnsupported)
276{
277 return m_LayerSupport->IsConvolution2dSupported(input,
278 output,
279 descriptor,
280 weights,
281 biases,
282 reasonIfUnsupported.value());
283}
284
285bool LayerSupportHandle::IsDebugSupported(const TensorInfo& input,
286 const TensorInfo& output,
287 Optional<std::string&> reasonIfUnsupported)
288{
289 return m_LayerSupport->IsDebugSupported(input, output, reasonIfUnsupported.value());
290}
291
292bool LayerSupportHandle::IsDepthToSpaceSupported(const TensorInfo& input,
293 const TensorInfo& output,
294 const DepthToSpaceDescriptor& descriptor,
295 Optional<std::string&> reasonIfUnsupported)
296{
297 return m_LayerSupport->IsDepthToSpaceSupported(input, output, descriptor, reasonIfUnsupported.value());
298}
299
300bool LayerSupportHandle::IsDepthwiseConvolutionSupported(
301 const TensorInfo& input,
302 const TensorInfo& output,
303 const DepthwiseConvolution2dDescriptor& descriptor,
304 const TensorInfo& weights,
305 const Optional<TensorInfo>& biases,
306 Optional<std::string&> reasonIfUnsupported)
307{
308 return m_LayerSupport->IsDepthwiseConvolutionSupported(input,
309 output,
310 descriptor,
311 weights,
312 biases,
313 reasonIfUnsupported.value());
314}
315
316bool LayerSupportHandle::IsDequantizeSupported(const TensorInfo& input,
317 const TensorInfo& output,
318 Optional<std::string&> reasonIfUnsupported)
319{
320 return m_LayerSupport->IsDequantizeSupported(input, output, reasonIfUnsupported.value());
321}
322
323bool LayerSupportHandle::IsDetectionPostProcessSupported(const TensorInfo& boxEncodings,
324 const TensorInfo& scores,
325 const TensorInfo& anchors,
326 const TensorInfo& detectionBoxes,
327 const TensorInfo& detectionClasses,
328 const TensorInfo& detectionScores,
329 const TensorInfo& numDetections,
330 const DetectionPostProcessDescriptor& descriptor,
331 Optional<std::string&> reasonIfUnsupported)
332{
333 return m_LayerSupport->IsDetectionPostProcessSupported(boxEncodings,
334 scores,
335 anchors,
336 detectionBoxes,
337 detectionClasses,
338 detectionScores,
339 numDetections,
340 descriptor,
341 reasonIfUnsupported);
342}
343
344bool LayerSupportHandle::IsDilatedDepthwiseConvolutionSupported(
345 const TensorInfo& input,
346 const TensorInfo& output,
347 const DepthwiseConvolution2dDescriptor& descriptor,
348 const TensorInfo& weights,
349 const Optional<TensorInfo>& biases,
350 Optional<std::string&> reasonIfUnsupported)
351{
352 return m_LayerSupport->IsDilatedDepthwiseConvolutionSupported(input,
353 output,
354 descriptor,
355 weights,
356 biases,
357 reasonIfUnsupported);
358}
359
360bool LayerSupportHandle::IsDivisionSupported(const TensorInfo& input0,
361 const TensorInfo& input1,
362 const TensorInfo& output,
363 Optional<std::string&> reasonIfUnsupported)
364{
365 return m_LayerSupport->IsDivisionSupported(input0, input1, output, reasonIfUnsupported.value());
366}
367
368bool LayerSupportHandle::IsElementwiseUnarySupported(const TensorInfo& input,
369 const TensorInfo& output,
370 const ElementwiseUnaryDescriptor& descriptor,
371 Optional<std::string&> reasonIfUnsupported)
372{
373 return m_LayerSupport->IsElementwiseUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
374}
375
376bool LayerSupportHandle::IsEqualSupported(const TensorInfo& input0,
377 const TensorInfo& input1,
378 const TensorInfo& output,
379 Optional<std::string&> reasonIfUnsupported)
380{
381 return m_LayerSupport->IsEqualSupported(input0, input1, output, reasonIfUnsupported.value());
382}
383
384bool LayerSupportHandle::IsFakeQuantizationSupported(const TensorInfo& input,
385 const FakeQuantizationDescriptor& descriptor,
386 Optional<std::string&> reasonIfUnsupported)
387{
388 return m_LayerSupport->IsFakeQuantizationSupported(input, descriptor, reasonIfUnsupported.value());
389}
390
391bool LayerSupportHandle::IsFillSupported(const TensorInfo& input,
392 const TensorInfo& output,
393 const FillDescriptor& descriptor,
394 Optional<std::string&> reasonIfUnsupported)
395{
396 return m_LayerSupport->IsFillSupported(input, output, descriptor, reasonIfUnsupported.value());
397}
398
399bool LayerSupportHandle::IsFloorSupported(const TensorInfo& input,
400 const TensorInfo& output,
401 Optional<std::string&> reasonIfUnsupported)
402{
403 return m_LayerSupport->IsFloorSupported(input, output, reasonIfUnsupported.value());
404}
405
406bool LayerSupportHandle::IsFullyConnectedSupported(const TensorInfo& input,
407 const TensorInfo& output,
408 const TensorInfo& weights,
409 const TensorInfo& biases,
410 const FullyConnectedDescriptor& descriptor,
411 Optional<std::string&> reasonIfUnsupported)
412{
Matthew Sloyan81beae32021-07-13 19:46:11 +0100413 if(!m_BackendId.IsUndefined())
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000414 {
Matthew Sloyan81beae32021-07-13 19:46:11 +0100415 auto capability = GetCapability("ConstantTensorsAsInputs", m_BackendId);
416 if(!capability.has_value() || capability.value().GetValue().AsBool() == false)
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000417 {
Matthew Sloyan81beae32021-07-13 19:46:11 +0100418 if(!weights.IsConstant())
419 {
Matthew Sloyanb20d1d42021-08-09 15:33:41 +0100420 reasonIfUnsupported.value() =
421 "This backend might not support non constant weights. "
422 "If weights are constant make sure to set IsConstant when creating TensorInfo";
Matthew Sloyan81beae32021-07-13 19:46:11 +0100423 return false;
424 }
425 if(descriptor.m_BiasEnabled)
426 {
427 if(!biases.IsConstant())
428 {
Matthew Sloyanb20d1d42021-08-09 15:33:41 +0100429 reasonIfUnsupported.value() =
430 "This backend might not support non constant bias. "
431 "If bias are constant make sure to set IsConstant when creating TensorInfo";
Matthew Sloyan81beae32021-07-13 19:46:11 +0100432 return false;
433 }
434 }
435
436 // At the first stage we will only print a warning. this is to give
437 // backend developers a chance to adopt and read weights from input slots.
438 ARMNN_LOG(warning) << "The backend makes use of a deprecated interface to read constant tensors. "
439 "If you are a backend developer please find more information in our "
440 "doxygen documentation on github https://github.com/ARM-software/armnn "
441 "under the keyword 'ConstTensorsAsInputs'.";
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000442 }
Matthew Sloyan81beae32021-07-13 19:46:11 +0100443
444 if(!descriptor.m_ConstantWeights)
445 {
446 auto capability = GetCapability("NonConstWeights", m_BackendId);
447 if (capability.has_value() && capability.value().GetValue().AsBool() == true)
448 {
449 return true;
450 }
451 return false;
452 }
Sadik Armaganf0a6dec2021-03-25 07:46:55 +0000453 }
454
Francis Murtagh7909c532021-01-28 14:25:15 +0000455 return m_LayerSupport->IsFullyConnectedSupported(input,
Matthew Sloyan81beae32021-07-13 19:46:11 +0100456 output,
457 weights,
458 biases,
459 descriptor,
460 reasonIfUnsupported.value());
Francis Murtagh7909c532021-01-28 14:25:15 +0000461}
462
463bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
464 const TensorInfo& input1,
465 const TensorInfo& output,
466 Optional<std::string&> reasonIfUnsupported)
467{
468 return m_LayerSupport->IsGatherSupported(input0, input1, output, reasonIfUnsupported.value());
469}
470
471bool LayerSupportHandle::IsGatherSupported(const TensorInfo& input0,
472 const TensorInfo& input1,
473 const TensorInfo& output,
474 const GatherDescriptor& descriptor,
475 Optional<std::string&> reasonIfUnsupported)
476{
477 return m_LayerSupport->IsGatherSupported(input0, input1, output, descriptor, reasonIfUnsupported.value());
478}
479
480bool LayerSupportHandle::IsGreaterSupported(const TensorInfo& input0,
481 const TensorInfo& input1,
482 const TensorInfo& ouput,
483 Optional<std::string&> reasonIfUnsupported)
484{
485 return m_LayerSupport->IsGreaterSupported(input0, input1, ouput, reasonIfUnsupported.value());
486}
487
488bool LayerSupportHandle::IsInputSupported(const TensorInfo& input,
489 Optional<std::string&> reasonIfUnsupported)
490{
491 return m_LayerSupport->IsInputSupported(input, reasonIfUnsupported.value());
492}
493
494bool LayerSupportHandle::IsInstanceNormalizationSupported(
495 const TensorInfo& input,
496 const TensorInfo& output,
497 const InstanceNormalizationDescriptor& descriptor,
498 Optional<std::string&> reasonIfUnsupported)
499{
500 return m_LayerSupport->IsInstanceNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
501}
502
503bool LayerSupportHandle::IsL2NormalizationSupported(const TensorInfo& input,
504 const TensorInfo& output,
505 const L2NormalizationDescriptor& descriptor,
506 Optional<std::string&> reasonIfUnsupported)
507{
508 return m_LayerSupport->IsL2NormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
509}
510
511bool LayerSupportHandle::IsLogicalBinarySupported(const TensorInfo& input0,
512 const TensorInfo& input1,
513 const TensorInfo& output,
514 const LogicalBinaryDescriptor& descriptor,
515 Optional<std::string&> reasonIfUnsupported)
516{
517 return m_LayerSupport->IsLogicalBinarySupported(input0,
518 input1,
519 output,
520 descriptor,
521 reasonIfUnsupported.value());
522}
523
524bool LayerSupportHandle::IsLogicalUnarySupported(const TensorInfo& input,
525 const TensorInfo& output,
526 const ElementwiseUnaryDescriptor& descriptor,
527 Optional<std::string&> reasonIfUnsupported)
528{
529 return m_LayerSupport->IsLogicalUnarySupported(input, output, descriptor, reasonIfUnsupported.value());
530}
531
532bool LayerSupportHandle::IsLogSoftmaxSupported(const TensorInfo& input,
533 const TensorInfo& output,
534 const LogSoftmaxDescriptor& descriptor,
535 Optional<std::string&> reasonIfUnsupported)
536{
537 return m_LayerSupport->IsLogSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
538}
539
540bool LayerSupportHandle::IsLstmSupported(const TensorInfo& input,
541 const TensorInfo& outputStateIn,
542 const TensorInfo& cellStateIn,
543 const TensorInfo& scratchBuffer,
544 const TensorInfo& outputStateOut,
545 const TensorInfo& cellStateOut,
546 const TensorInfo& output,
547 const LstmDescriptor& descriptor,
548 const LstmInputParamsInfo& paramsInfo,
549 Optional<std::string&> reasonIfUnsupported)
550{
551 return m_LayerSupport->IsLstmSupported(input,
552 outputStateIn,
553 cellStateIn,
554 scratchBuffer,
555 outputStateOut,
556 cellStateOut,
557 output,
558 descriptor,
559 paramsInfo,
560 reasonIfUnsupported);
561}
562
563bool LayerSupportHandle::IsMaximumSupported(const TensorInfo& input0,
564 const TensorInfo& input1,
565 const TensorInfo& output,
566 Optional<std::string&> reasonIfUnsupported)
567{
568 return m_LayerSupport->IsMaximumSupported(input0, input1, output, reasonIfUnsupported.value());
569}
570
571bool LayerSupportHandle::IsMeanSupported(const TensorInfo& input,
572 const TensorInfo& output,
573 const MeanDescriptor& descriptor,
574 Optional<std::string&> reasonIfUnsupported)
575{
576 return m_LayerSupport->IsMeanSupported(input, output, descriptor, reasonIfUnsupported.value());
577}
578
579bool LayerSupportHandle::IsMemCopySupported(const TensorInfo& input,
580 const TensorInfo& output,
581 Optional<std::string&> reasonIfUnsupported)
582{
583 return m_LayerSupport->IsMemCopySupported(input, output, reasonIfUnsupported.value());
584}
585
586bool LayerSupportHandle::IsMemImportSupported(const TensorInfo& input,
587 const TensorInfo& output,
588 Optional<std::string&> reasonIfUnsupported)
589{
590 return m_LayerSupport->IsMemImportSupported(input, output, reasonIfUnsupported.value());
591}
592
593bool LayerSupportHandle::IsMergeSupported(const TensorInfo& input0,
594 const TensorInfo& input1,
595 const TensorInfo& output,
596 Optional<std::string&> reasonIfUnsupported)
597{
598 return m_LayerSupport->IsMergeSupported(input0, input1, output, reasonIfUnsupported.value());
599}
600
601bool LayerSupportHandle::IsMergerSupported(const std::vector<const TensorInfo*> inputs,
602 const TensorInfo& output,
603 const OriginsDescriptor& descriptor,
604 Optional<std::string&> reasonIfUnsupported)
605{
606 return m_LayerSupport->IsMergerSupported(inputs, output, descriptor, reasonIfUnsupported.value());
607}
608
609bool LayerSupportHandle::IsMinimumSupported(const TensorInfo& input0,
610 const TensorInfo& input1,
611 const TensorInfo& output,
612 Optional<std::string&> reasonIfUnsupported)
613{
614 return m_LayerSupport->IsMinimumSupported(input0, input1, output, reasonIfUnsupported.value());
615}
616
617bool LayerSupportHandle::IsMultiplicationSupported(const TensorInfo& input0,
618 const TensorInfo& input1,
619 const TensorInfo& output,
620 Optional<std::string&> reasonIfUnsupported)
621{
622 return m_LayerSupport->IsMultiplicationSupported(input0, input1, output, reasonIfUnsupported.value());
623}
624
625bool LayerSupportHandle::IsNormalizationSupported(const TensorInfo& input,
626 const TensorInfo& output,
627 const NormalizationDescriptor& descriptor,
628 Optional<std::string&> reasonIfUnsupported)
629{
630 return m_LayerSupport->IsNormalizationSupported(input, output, descriptor, reasonIfUnsupported.value());
631}
632
633bool LayerSupportHandle::IsOutputSupported(const TensorInfo& output,
634 Optional<std::string&> reasonIfUnsupported)
635{
636 return m_LayerSupport->IsOutputSupported(output, reasonIfUnsupported.value());
637}
638
639bool LayerSupportHandle::IsPadSupported(const TensorInfo& input,
640 const TensorInfo& output,
641 const PadDescriptor& descriptor,
642 Optional<std::string&> reasonIfUnsupported)
643{
644 return m_LayerSupport->IsPadSupported(input, output, descriptor, reasonIfUnsupported.value());
645}
646
647bool LayerSupportHandle::IsPermuteSupported(const TensorInfo& input,
648 const TensorInfo& output,
649 const PermuteDescriptor& descriptor,
650 Optional<std::string&> reasonIfUnsupported)
651{
652 return m_LayerSupport->IsPermuteSupported(input, output, descriptor, reasonIfUnsupported.value());
653}
654
655bool LayerSupportHandle::IsPooling2dSupported(const TensorInfo& input,
656 const TensorInfo& output,
657 const Pooling2dDescriptor& descriptor,
658 Optional<std::string&> reasonIfUnsupported)
659{
660 return m_LayerSupport->IsPooling2dSupported(input, output, descriptor, reasonIfUnsupported.value());
661}
662
663bool LayerSupportHandle::IsPreCompiledSupported(const TensorInfo& input,
664 const PreCompiledDescriptor& descriptor,
665 Optional<std::string&> reasonIfUnsupported)
666{
667 return m_LayerSupport->IsPreCompiledSupported(input, descriptor, reasonIfUnsupported.value());
668}
669
670bool LayerSupportHandle::IsPreluSupported(const TensorInfo& input,
671 const TensorInfo& alpha,
672 const TensorInfo& output,
673 Optional<std::string&> reasonIfUnsupported)
674{
675 return m_LayerSupport->IsPreluSupported(input, alpha, output, reasonIfUnsupported.value());
676}
677
678bool LayerSupportHandle::IsQuantizeSupported(const TensorInfo& input,
679 const TensorInfo& output,
680 Optional<std::string&> reasonIfUnsupported)
681{
682 return m_LayerSupport->IsQuantizeSupported(input, output, reasonIfUnsupported.value());
683}
684
685bool LayerSupportHandle::IsQLstmSupported(const TensorInfo& input,
686 const TensorInfo& previousOutputIn,
687 const TensorInfo& previousCellStateIn,
688 const TensorInfo& outputStateOut,
689 const TensorInfo& cellStateOut,
690 const TensorInfo& output,
691 const QLstmDescriptor& descriptor,
692 const LstmInputParamsInfo& paramsInfo,
693 Optional<std::string&> reasonIfUnsupported)
694{
695 return m_LayerSupport->IsQLstmSupported(input,
696 previousOutputIn,
697 previousCellStateIn,
698 outputStateOut,
699 cellStateOut,
700 output,
701 descriptor,
702 paramsInfo,
703 reasonIfUnsupported);
704}
705
706bool LayerSupportHandle::IsQuantizedLstmSupported(const TensorInfo& input,
707 const TensorInfo& previousCellStateIn,
708 const TensorInfo& previousOutputIn,
709 const TensorInfo& cellStateOut,
710 const TensorInfo& output,
711 const QuantizedLstmInputParamsInfo& paramsInfo,
712 Optional<std::string&> reasonIfUnsupported)
713{
714 return m_LayerSupport->IsQuantizedLstmSupported(input,
715 previousCellStateIn,
716 previousOutputIn,
717 cellStateOut,
718 output,
719 paramsInfo,
720 reasonIfUnsupported);
721}
722
723bool LayerSupportHandle::IsRankSupported(const TensorInfo& input,
724 const TensorInfo& output,
725 Optional<std::string&> reasonIfUnsupported)
726{
727 return m_LayerSupport->IsRankSupported(input, output, reasonIfUnsupported.value());
728}
729
Sadik Armagana2747482021-02-09 10:28:54 +0000730bool LayerSupportHandle::IsReduceSupported(const TensorInfo& input,
731 const TensorInfo& output,
732 const ReduceDescriptor& descriptor,
733 Optional<std::string&> reasonIfUnsupported)
734{
735 return m_LayerSupport->IsReduceSupported(input, output, descriptor, reasonIfUnsupported.value());
736}
737
Francis Murtagh7909c532021-01-28 14:25:15 +0000738bool LayerSupportHandle::IsReshapeSupported(const TensorInfo& input,
739 const TensorInfo& output,
740 const ReshapeDescriptor& descriptor,
741 Optional<std::string&> reasonIfUnsupported)
742{
743 return m_LayerSupport->IsReshapeSupported(input, output, descriptor, reasonIfUnsupported.value());
744}
745
746bool LayerSupportHandle::IsResizeBilinearSupported(const TensorInfo& input,
747 const TensorInfo& output,
748 Optional<std::string&> reasonIfUnsupported)
749{
750 return m_LayerSupport->IsResizeBilinearSupported(input, output, reasonIfUnsupported.value());
751}
752
753bool LayerSupportHandle::IsResizeSupported(const TensorInfo& input,
754 const TensorInfo& output,
755 const ResizeDescriptor& descriptor,
756 Optional<std::string&> reasonIfUnsupported)
757{
758 return m_LayerSupport->IsResizeSupported(input, output, descriptor, reasonIfUnsupported.value());
759}
760
761bool LayerSupportHandle::IsRsqrtSupported(const TensorInfo& input,
762 const TensorInfo& output,
763 Optional<std::string&> reasonIfUnsupported)
764{
765 return m_LayerSupport->IsRsqrtSupported(input, output, reasonIfUnsupported.value());
766}
767
Keith Davis3ae3f972021-05-21 16:33:48 +0100768bool LayerSupportHandle::IsShapeSupported(const TensorInfo& input,
769 const TensorInfo& output,
770 Optional<std::string&> reasonIfUnsupported)
771{
772 return m_LayerSupport->IsShapeSupported(input, output, reasonIfUnsupported.value());
773}
774
Francis Murtagh7909c532021-01-28 14:25:15 +0000775bool LayerSupportHandle::IsSliceSupported(const TensorInfo& input,
776 const TensorInfo& output,
777 const SliceDescriptor& descriptor,
778 Optional<std::string&> reasonIfUnsupported)
779{
780 return m_LayerSupport->IsSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
781}
782
783bool LayerSupportHandle::IsSoftmaxSupported(const TensorInfo& input,
784 const TensorInfo& output,
785 const SoftmaxDescriptor& descriptor,
786 Optional<std::string&> reasonIfUnsupported)
787{
788 return m_LayerSupport->IsSoftmaxSupported(input, output, descriptor, reasonIfUnsupported.value());
789}
790
791bool LayerSupportHandle::IsSpaceToBatchNdSupported(const TensorInfo& input,
792 const TensorInfo& output,
793 const SpaceToBatchNdDescriptor& descriptor,
794 Optional<std::string&> reasonIfUnsupported)
795{
796 return m_LayerSupport->IsSpaceToBatchNdSupported(input, output, descriptor, reasonIfUnsupported.value());
797}
798
799bool LayerSupportHandle::IsSpaceToDepthSupported(const TensorInfo& input,
800 const TensorInfo& output,
801 const SpaceToDepthDescriptor& descriptor,
802 Optional<std::string&> reasonIfUnsupported)
803{
804 return m_LayerSupport->IsSpaceToDepthSupported(input, output, descriptor, reasonIfUnsupported.value());
805}
806
807bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
808 const ViewsDescriptor& descriptor,
809 Optional<std::string&> reasonIfUnsupported)
810{
811 return m_LayerSupport->IsSplitterSupported(input, descriptor, reasonIfUnsupported.value());
812}
813
814bool LayerSupportHandle::IsSplitterSupported(const TensorInfo& input,
815 const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
816 const ViewsDescriptor& descriptor,
817 Optional<std::string&> reasonIfUnsupported)
818{
819 return m_LayerSupport->IsSplitterSupported(input, outputs, descriptor, reasonIfUnsupported.value());
820}
821
822bool LayerSupportHandle::IsStackSupported(const std::vector<const TensorInfo*>& inputs,
823 const TensorInfo& output,
824 const StackDescriptor& descriptor,
825 Optional<std::string&> reasonIfUnsupported)
826{
827 return m_LayerSupport->IsStackSupported(inputs, output, descriptor, reasonIfUnsupported.value());
828}
829
830bool LayerSupportHandle::IsStandInSupported(const std::vector<const TensorInfo*>& inputs,
831 const std::vector<const TensorInfo*>& outputs,
832 const StandInDescriptor& descriptor,
833 Optional<std::string&> reasonIfUnsupported)
834{
835 return m_LayerSupport->IsStandInSupported(inputs, outputs, descriptor, reasonIfUnsupported.value());
836}
837
838
839bool LayerSupportHandle::IsStridedSliceSupported(const TensorInfo& input,
840 const TensorInfo& output,
841 const StridedSliceDescriptor& descriptor,
842 Optional<std::string&> reasonIfUnsupported)
843{
844 return m_LayerSupport->IsStridedSliceSupported(input, output, descriptor, reasonIfUnsupported.value());
845}
846
847bool LayerSupportHandle::IsSubtractionSupported(const TensorInfo& input0,
848 const TensorInfo& input1,
849 const TensorInfo& output,
850 Optional<std::string&> reasonIfUnsupported)
851{
852 return m_LayerSupport->IsSubtractionSupported(input0, input1, output, reasonIfUnsupported.value());
853}
854
855bool LayerSupportHandle::IsSwitchSupported(const TensorInfo& input0,
856 const TensorInfo& input1,
857 const TensorInfo& output0,
858 const TensorInfo& output1,
859 Optional<std::string&> reasonIfUnsupported)
860{
861 return m_LayerSupport->IsSwitchSupported(input0, input1, output0, output1, reasonIfUnsupported.value());
862}
863
864bool LayerSupportHandle::IsTransposeConvolution2dSupported(
865 const TensorInfo& input,
866 const TensorInfo& output,
867 const TransposeConvolution2dDescriptor& descriptor,
868 const TensorInfo& weights,
869 const Optional<TensorInfo>& biases,
870 Optional<std::string&> reasonIfUnsupported)
871{
872 return m_LayerSupport->IsTransposeConvolution2dSupported(input,
873 output,
874 descriptor,
875 weights,
876 biases,
877 reasonIfUnsupported.value());
878}
879
880bool LayerSupportHandle::IsTransposeSupported(const TensorInfo& input,
881 const TensorInfo& output,
882 const TransposeDescriptor& descriptor,
883 Optional<std::string&> reasonIfUnsupported)
884{
885 return m_LayerSupport->IsTransposeSupported(input, output, descriptor, reasonIfUnsupported.value());
886}
887
Narumol Prangnawarat8ed39ae2021-07-15 16:16:25 +0100888bool LayerSupportHandle::IsUnidirectionalSequenceLstmSupported(const TensorInfo& input,
889 const TensorInfo& outputStateIn,
890 const TensorInfo& cellStateIn,
891 const TensorInfo& output,
892 const Optional<TensorInfo>& hiddenStateOutput,
893 const Optional<TensorInfo>& cellStateOutput,
894 const LstmDescriptor& descriptor,
895 const LstmInputParamsInfo& paramsInfo,
896 Optional<std::string&> reasonIfUnsupported)
897{
898 return m_LayerSupport->IsUnidirectionalSequenceLstmSupported(input,
899 outputStateIn,
900 cellStateIn,
901 output,
902 hiddenStateOutput,
903 cellStateOutput,
904 descriptor,
905 paramsInfo,
906 reasonIfUnsupported);
907}
908
Francis Murtagh7909c532021-01-28 14:25:15 +0000909}