blob: 5cbdb6f356c5cdb15b548e82df455c8738ce12cc [file] [log] [blame]
Sadik Armagan3c24f432020-10-19 17:35:30 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +01007
8#include "Activation.hpp"
9#include "ArgMinMax.hpp"
10#include "BatchSpace.hpp"
11#include "Comparison.hpp"
12#include "Convolution.hpp"
13#include "Control.hpp"
14#include "ElementwiseBinary.hpp"
15#include "ElementwiseUnary.hpp"
16#include "Fill.hpp"
17#include "FullyConnected.hpp"
18#include "Gather.hpp"
19#include "Lstm.hpp"
20#include "Normalization.hpp"
21#include "Pad.hpp"
22#include "Pooling.hpp"
23#include "Quantization.hpp"
24#include "Redefine.hpp"
25#include "Resize.hpp"
26#include "Round.hpp"
27#include "Slice.hpp"
28#include "Softmax.hpp"
29#include "SpaceDepth.hpp"
30#include "Transpose.hpp"
31
32#include <flatbuffers/flatbuffers.h>
33#include <tensorflow/lite/context_util.h>
34
Sadik Armagan3c24f432020-10-19 17:35:30 +010035#include <algorithm>
Sadik Armagan62483be2020-10-23 17:14:43 +010036#include <sstream>
Sadik Armagan3c24f432020-10-19 17:35:30 +010037
38namespace armnnDelegate
39{
40
Sadik Armagan62483be2020-10-23 17:14:43 +010041DelegateOptions TfLiteArmnnDelegateOptionsDefault()
42{
43 DelegateOptions options(armnn::Compute::CpuRef);
44 return options;
45}
46
47TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
48{
49 auto* armnnDelegate = new ::armnnDelegate::Delegate(options);
50 return armnnDelegate->GetDelegate();
51}
52
53void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate)
54{
55 if (tfLiteDelegate != nullptr)
56 {
57 delete static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_);
58 }
59}
60
61TfLiteStatus DoPrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDelegate)
62{
63 TfLiteIntArray* supportedOperators =
64 static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_)->IdentifyOperatorsToDelegate(tfLiteContext);
65
66 // ArmNN Delegate Registration
67 static const TfLiteRegistration kArmnnSubgraphRegistration = {
68 // ArmnnSubgraph Init
69 .init = [](TfLiteContext* tfLiteContext, const char* buffer, size_t length) -> void* {
70 const TfLiteDelegateParams* parameters = reinterpret_cast<const TfLiteDelegateParams*>(buffer);
71
72 return static_cast<void*>(ArmnnSubgraph::Create(
73 tfLiteContext, parameters, static_cast<::armnnDelegate::Delegate*>(parameters->delegate->data_)));
74 },
75 // ArmnnSubgraph Free
76 .free = [](TfLiteContext* tfLiteContext, void* buffer) -> void {
77 if (buffer != nullptr)
78 {
79 delete static_cast<ArmnnSubgraph*>(buffer);
80 }
81 },
82 // ArmnnSubgraph Prepare
83 .prepare = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
84 if (tfLiteNode->user_data == nullptr)
85 {
86 return kTfLiteError;
87 }
88
89 return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Prepare(tfLiteContext);
90 },
91 // ArmnnSubgraph Invoke
92 .invoke = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
93 if (tfLiteNode->user_data == nullptr)
94 {
95 return kTfLiteError;
96 }
97
98 return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Invoke(tfLiteContext, tfLiteNode);
99 },
100
101 .profiling_string = nullptr,
102 .builtin_code = kTfLiteBuiltinDelegate,
103 .custom_name = "TfLiteArmNnDelegate",
104 .version = 1,
105 };
106
107 const TfLiteStatus status =
108 tfLiteContext->ReplaceNodeSubsetsWithDelegateKernels(
109 tfLiteContext, kArmnnSubgraphRegistration, supportedOperators, tfLiteDelegate);
110
111 TfLiteIntArrayFree(supportedOperators);
112 return status;
113
114}
115
Sadik Armagan3c24f432020-10-19 17:35:30 +0100116Delegate::Delegate(armnnDelegate::DelegateOptions options)
117 : m_Runtime(nullptr, nullptr),
118 m_Options(std::move(options))
119{
120 // Create ArmNN Runtime
121 armnn::IRuntime::CreationOptions runtimeOptions;
122 m_Runtime = armnn::IRuntime::Create(runtimeOptions);
123
124 std::vector<armnn::BackendId> backends;
125
126 if (m_Runtime)
127 {
128 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
129 for (auto& backend : m_Options.GetBackends())
130 {
131 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
132 {
133 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
134 "TfLiteArmnnDelegate: Requested unknown backend %s", backend.Get().c_str());
135 }
136 else
137 {
138 backends.push_back(backend);
139 }
140 }
141 }
142
143 if (backends.empty())
144 {
145 // No known backend specified
146 throw armnn::InvalidArgumentException("TfLiteArmnnDelegate: No known backend specified.");
147 }
148 m_Options.SetBackends(backends);
149
150 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnDelegate: Created TfLite ArmNN delegate.");
151}
152
Sadik Armagan62483be2020-10-23 17:14:43 +0100153TfLiteIntArray* Delegate::IdentifyOperatorsToDelegate(TfLiteContext* tfLiteContext)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100154{
155 TfLiteIntArray* executionPlan = nullptr;
156 if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
157 {
158 TF_LITE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Unable to get graph execution plan.");
159 return nullptr;
160 }
161
Sadik Armagan62483be2020-10-23 17:14:43 +0100162 // Delegate data with null network
163 DelegateData delegateData(m_Options.GetBackends());
Sadik Armagan3c24f432020-10-19 17:35:30 +0100164
165 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
166 nodesToDelegate->size = 0;
167 for (int i = 0; i < executionPlan->size; ++i)
168 {
169 const int nodeIndex = executionPlan->data[i];
170
171 // If TfLite nodes can be delegated to ArmNN
172 TfLiteNode* tfLiteNode = nullptr;
173 TfLiteRegistration* tfLiteRegistration = nullptr;
174 if (tfLiteContext->GetNodeAndRegistration(
175 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
176 {
177 TF_LITE_KERNEL_LOG(tfLiteContext,
178 "TfLiteArmnnDelegate: Unable to get node and registration for node %d.",
179 nodeIndex);
180 continue;
181 }
182
183 if (ArmnnSubgraph::VisitNode(
Sadik Armagan62483be2020-10-23 17:14:43 +0100184 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100185 {
186 // node is not supported by ArmNN
187 continue;
188 }
189
190 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
191 }
192
Sadik Armagan62483be2020-10-23 17:14:43 +0100193 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100194 return nodesToDelegate;
195}
196
197TfLiteDelegate* Delegate::GetDelegate()
198{
199 return &m_Delegate;
200}
201
Sadik Armagan62483be2020-10-23 17:14:43 +0100202TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
203 TfLiteContext* tfLiteContext,
204 const TfLiteIntArray* inputs,
205 std::vector<armnn::BindingPointInfo>& inputBindings)
206{
207 const size_t numInputs = inputs->size;
208 for (unsigned int i = 0; i < numInputs; ++i)
209 {
210 const int32_t tensorId = inputs->data[i];
211 const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
212
213 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
214 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
215
216 auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
217 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
218 outputSlot.SetTensorInfo(tensorInfo);
219
220 // Store for creating connections
221 delegateData.m_OutputSlotForNode[tensorId] = &outputSlot;
222
223 // Do not create bindings for constant inputs
224 if (tensor.allocation_type != kTfLiteMmapRo)
225 {
226 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
227 }
228 }
229 return kTfLiteOk;
230}
231
232TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
233 TfLiteContext* tfLiteContext,
234 const TfLiteIntArray* outputs,
235 std::vector<armnn::BindingPointInfo>& outputBindings)
236{
237 const size_t numOutputs = outputs->size;
238 for (unsigned int i = 0; i < numOutputs; ++i)
239 {
240 const int32_t tensorId = outputs->data[i];
241 const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
242
243 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
244 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
245
246 auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
247
248 ARMNN_ASSERT(delegateData.m_OutputSlotForNode[tensorId] != nullptr);
249 delegateData.m_OutputSlotForNode[tensorId]->Connect(layer->GetInputSlot(0));
250 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
251 }
252
253 return kTfLiteOk;
254}
255
Sadik Armagan3c24f432020-10-19 17:35:30 +0100256ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
257 const TfLiteDelegateParams* parameters,
258 const Delegate* delegate)
259{
260 TfLiteIntArray* executionPlan;
261 if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
262 {
263 return nullptr;
264 }
265
Sadik Armagan62483be2020-10-23 17:14:43 +0100266 // Initialize DelegateData holds network and output slots information
267 DelegateData delegateData(delegate->m_Options.GetBackends());
268
269 // Build ArmNN Network
Sadik Armagan3c24f432020-10-19 17:35:30 +0100270 using NetworkOptions = std::vector<armnn::BackendOptions>;
271 armnn::NetworkOptions networkOptions = {};
272 armnn::NetworkId networkId;
Sadik Armagan62483be2020-10-23 17:14:43 +0100273 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100274
Sadik Armagan62483be2020-10-23 17:14:43 +0100275 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(parameters->nodes_to_replace->size, nullptr);
276
277 std::vector<armnn::BindingPointInfo> inputBindings;
278 std::vector<armnn::BindingPointInfo> outputBindings;
279
280 // Add input layer
281 auto status = AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings);
282 if (status != kTfLiteOk)
283 {
284 throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Inputs to the network!");
285 }
286
287 // Parse TfLite delegate nodes to ArmNN
Sadik Armagan3c24f432020-10-19 17:35:30 +0100288 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
289 {
290 const int nodeIndex = parameters->nodes_to_replace->data[i];
291
292 TfLiteNode* tfLiteNode = nullptr;
293 TfLiteRegistration* tfLiteRegistration = nullptr;
294 if (tfLiteContext->GetNodeAndRegistration(
295 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
296 {
297 throw armnn::Exception("TfLiteArmnnDelegate: Unable to get node registration: " + nodeIndex);
298 }
299
Sadik Armagan62483be2020-10-23 17:14:43 +0100300 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100301 {
302 throw armnn::Exception("TfLiteArmnnDelegate: Unable to parse node: " + nodeIndex);
303 }
304 }
305
Sadik Armagan62483be2020-10-23 17:14:43 +0100306 // Add Output layer
307 status = AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings);
308 if (status != kTfLiteOk)
309 {
310 throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Outputs to the network!");
311 }
312
313 // Optimize ArmNN network
314 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
315 try
316 {
317
318 optNet = armnn::Optimize(*(delegateData.m_Network),
319 delegate->m_Options.GetBackends(),
320 delegate->m_Runtime->GetDeviceSpec());
321 }
322 catch (std::exception &ex)
323 {
324 std::stringstream exMessage;
325 exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from optimize.";
326 throw armnn::Exception(exMessage.str());
327 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100328 if (!optNet)
329 {
Sadik Armagan62483be2020-10-23 17:14:43 +0100330 // Optimize failed
Sadik Armagan3c24f432020-10-19 17:35:30 +0100331 throw armnn::Exception("TfLiteArmnnDelegate: Unable to optimize the network!");
332 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100333
334 try
335 {
336 // Load graph into runtime
337 auto loadingStatus = delegate->m_Runtime->LoadNetwork(networkId, std::move(optNet));
338 if (loadingStatus != armnn::Status::Success)
339 {
340 // Optimize failed
341 throw armnn::Exception("TfLiteArmnnDelegate: Network could not be loaded!");;
342 }
343 }
344 catch (std::exception& ex)
345 {
346 std::stringstream exMessage;
347 exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
348 throw armnn::Exception(exMessage.str());
349 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100350
351 // Create a new SubGraph with networkId and runtime
Sadik Armagan62483be2020-10-23 17:14:43 +0100352 return new ArmnnSubgraph(networkId, delegate->m_Runtime.get(), inputBindings, outputBindings);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100353}
354
355TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext)
356{
357 return kTfLiteOk;
358}
359
Sadik Armagan62483be2020-10-23 17:14:43 +0100360TfLiteStatus ArmnnSubgraph::Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100361{
Sadik Armagan62483be2020-10-23 17:14:43 +0100362 // Prepare inputs
363 armnn::InputTensors inputTensors;
364 size_t inputIndex = 0;
365 for (auto inputIdx : tflite::TfLiteIntArrayView(tfLiteNode->inputs))
366 {
367 TfLiteTensor* tensor = &tfLiteContext->tensors[inputIdx];
368 if (tensor->allocation_type != kTfLiteMmapRo)
369 {
370 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
371 const armnn::ConstTensor inputTensor(inputBinding.second, tensor->data.data);
372 inputTensors.emplace_back(inputIdx, inputTensor);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100373
Sadik Armagan62483be2020-10-23 17:14:43 +0100374 ++inputIndex;
375 }
376 }
377
378 // Prepare outputs
379 armnn::OutputTensors outputTensors;
380 size_t outputIndex = 0;
381 for (auto outputIdx : tflite::TfLiteIntArrayView(tfLiteNode->outputs))
382 {
383 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIndex];
384 TfLiteTensor* tensor = &tfLiteContext->tensors[outputIdx];
385 const armnn::Tensor outputTensor(outputBinding.second, tensor->data.data);
386 outputTensors.emplace_back(outputIdx, outputTensor);
387
388 ++outputIndex;
389 }
390
391 // Run graph
392 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
393 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
Sadik Armagan3c24f432020-10-19 17:35:30 +0100394}
395
Sadik Armagan62483be2020-10-23 17:14:43 +0100396TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
Sadik Armagan3c24f432020-10-19 17:35:30 +0100397 TfLiteContext* tfLiteContext,
398 TfLiteRegistration* tfLiteRegistration,
399 TfLiteNode* tfLiteNode,
400 int nodeIndex)
401{
Sadik Armagan62483be2020-10-23 17:14:43 +0100402 switch (tfLiteRegistration->builtin_code)
403 {
404 case kTfLiteBuiltinAbs:
405 return VisitElementwiseUnaryOperator(delegateData,
406 tfLiteContext,
407 tfLiteNode,
408 nodeIndex,
409 armnn::UnaryOperation::Abs);
410 case kTfLiteBuiltinAdd:
411 return VisitElementwiseBinaryOperator(delegateData,
412 tfLiteContext,
413 tfLiteNode,
414 nodeIndex,
415 kTfLiteBuiltinAdd);
416 case kTfLiteBuiltinArgMax:
417 return VisitArgMinMaxOperator(delegateData,
418 tfLiteContext,
419 tfLiteNode,
420 nodeIndex,
421 kTfLiteBuiltinArgMax);
422 case kTfLiteBuiltinArgMin:
423 return VisitArgMinMaxOperator(delegateData,
424 tfLiteContext,
425 tfLiteNode,
426 nodeIndex,
427 kTfLiteBuiltinArgMin);
428 case kTfLiteBuiltinAveragePool2d:
429 return VisitPoolingOperator(delegateData,
430 tfLiteContext,
431 tfLiteNode,
432 nodeIndex,
433 kTfLiteBuiltinAveragePool2d);
434 case kTfLiteBuiltinBatchToSpaceNd:
435 return VisitBatchToSpaceNdOperator(delegateData,
436 tfLiteContext,
437 tfLiteNode,
438 nodeIndex,
439 kTfLiteBuiltinBatchToSpaceNd);
440 case kTfLiteBuiltinConcatenation:
441 return VisitControlOperator(delegateData,
442 tfLiteContext,
443 tfLiteNode,
444 nodeIndex,
445 kTfLiteBuiltinConcatenation);
446 case kTfLiteBuiltinConv2d:
447 return VisitConvolutionOperator(delegateData,
448 tfLiteContext,
449 tfLiteNode,
450 nodeIndex,
451 kTfLiteBuiltinConv2d);
452 case kTfLiteBuiltinDepthToSpace:
453 return VisitDepthToSpaceOperator(delegateData,
454 tfLiteContext,
455 tfLiteNode,
456 nodeIndex,
457 kTfLiteBuiltinDepthToSpace);
458 case kTfLiteBuiltinDepthwiseConv2d:
459 return VisitConvolutionOperator(delegateData,
460 tfLiteContext,
461 tfLiteNode,
462 nodeIndex,
463 kTfLiteBuiltinDepthwiseConv2d);
464 case kTfLiteBuiltinDequantize:
465 return VisitDequantizeOperator(delegateData,
466 tfLiteContext,
467 tfLiteNode,
468 nodeIndex,
469 kTfLiteBuiltinDequantize);
470 case kTfLiteBuiltinDiv:
471 return VisitElementwiseBinaryOperator(delegateData,
472 tfLiteContext,
473 tfLiteNode,
474 nodeIndex,
475 kTfLiteBuiltinDiv);
476 case kTfLiteBuiltinElu:
477 return VisitActivationOperator(delegateData,
478 tfLiteContext,
479 tfLiteNode,
480 nodeIndex,
481 kTfLiteBuiltinElu);
482 case kTfLiteBuiltinEqual:
483 return VisitComparisonOperator(delegateData,
484 tfLiteContext,
485 tfLiteNode,
486 nodeIndex,
487 kTfLiteBuiltinEqual);
488 case kTfLiteBuiltinExp:
489 return VisitElementwiseUnaryOperator(delegateData,
490 tfLiteContext,
491 tfLiteNode,
492 nodeIndex,
493 armnn::UnaryOperation::Exp);
494 case kTfLiteBuiltinExpandDims:
495 return VisitExpandDimsOperator(delegateData,
496 tfLiteContext,
497 tfLiteNode,
498 nodeIndex,
499 kTfLiteBuiltinExpandDims);
500 case kTfLiteBuiltinFill:
501 return VisitFillOperator(delegateData,
502 tfLiteContext,
503 tfLiteNode,
504 nodeIndex,
505 kTfLiteBuiltinFill);
506 case kTfLiteBuiltinFloor:
507 return VisitFloorOperator(delegateData,
508 tfLiteContext,
509 tfLiteNode,
510 nodeIndex,
511 kTfLiteBuiltinFloor);
512 case kTfLiteBuiltinFullyConnected:
513 return VisitFullyConnectedOperator(delegateData,
514 tfLiteContext,
515 tfLiteNode,
516 nodeIndex,
517 kTfLiteBuiltinFullyConnected);
518 case kTfLiteBuiltinGather:
519 return VisitGatherOperator(delegateData,
520 tfLiteContext,
521 tfLiteNode,
522 nodeIndex,
523 kTfLiteBuiltinGather);
524 case kTfLiteBuiltinGatherNd:
525 return VisitGatherOperator(delegateData,
526 tfLiteContext,
527 tfLiteNode,
528 nodeIndex,
529 kTfLiteBuiltinGatherNd);
530 case kTfLiteBuiltinGreater:
531 return VisitComparisonOperator(delegateData,
532 tfLiteContext,
533 tfLiteNode,
534 nodeIndex,
535 kTfLiteBuiltinGreater);
536 case kTfLiteBuiltinGreaterEqual:
537 return VisitComparisonOperator(delegateData,
538 tfLiteContext,
539 tfLiteNode,
540 nodeIndex,
541 kTfLiteBuiltinGreaterEqual);
542 case kTfLiteBuiltinHardSwish:
543 return VisitActivationOperator(delegateData,
544 tfLiteContext,
545 tfLiteNode,
546 nodeIndex,
547 kTfLiteBuiltinHardSwish);
548 case kTfLiteBuiltinL2Normalization:
549 return VisitNormalizationOperator(delegateData,
550 tfLiteContext,
551 tfLiteNode,
552 nodeIndex,
553 kTfLiteBuiltinL2Normalization);
554 case kTfLiteBuiltinL2Pool2d:
555 return VisitPoolingOperator(delegateData,
556 tfLiteContext,
557 tfLiteNode,
558 nodeIndex,
559 kTfLiteBuiltinL2Pool2d);
560 case kTfLiteBuiltinLess:
561 return VisitComparisonOperator(delegateData,
562 tfLiteContext,
563 tfLiteNode,
564 nodeIndex,
565 kTfLiteBuiltinLess);
566 case kTfLiteBuiltinLessEqual:
567 return VisitComparisonOperator(delegateData,
568 tfLiteContext,
569 tfLiteNode,
570 nodeIndex,
571 kTfLiteBuiltinLessEqual);
572 case kTfLiteBuiltinLocalResponseNormalization:
573 return VisitNormalizationOperator(delegateData,
574 tfLiteContext,
575 tfLiteNode,
576 nodeIndex,
577 kTfLiteBuiltinLocalResponseNormalization);
578 case kTfLiteBuiltinLogistic:
579 return VisitActivationOperator(delegateData,
580 tfLiteContext,
581 tfLiteNode,
582 nodeIndex,
583 kTfLiteBuiltinLogistic);
584 case kTfLiteBuiltinLogSoftmax:
585 return VisitSoftmaxOperator(delegateData,
586 tfLiteContext,
587 tfLiteNode,
588 nodeIndex,
589 kTfLiteBuiltinLogSoftmax);
590 case kTfLiteBuiltinLstm:
591 return VisitLstmOperator(delegateData,
592 tfLiteContext,
593 tfLiteNode,
594 nodeIndex,
595 kTfLiteBuiltinLstm);
596 case kTfLiteBuiltinMaxPool2d:
597 return VisitPoolingOperator(delegateData,
598 tfLiteContext,
599 tfLiteNode,
600 nodeIndex,
601 kTfLiteBuiltinMaxPool2d);
602 case kTfLiteBuiltinMaximum:
603 return VisitElementwiseBinaryOperator(delegateData,
604 tfLiteContext,
605 tfLiteNode,
606 nodeIndex,
607 kTfLiteBuiltinMaximum);
608 case kTfLiteBuiltinMean:
609 return VisitControlOperator(delegateData,
610 tfLiteContext,
611 tfLiteNode,
612 nodeIndex,
613 kTfLiteBuiltinMean);
614 case kTfLiteBuiltinMinimum:
615 return VisitElementwiseBinaryOperator(delegateData,
616 tfLiteContext,
617 tfLiteNode,
618 nodeIndex,
619 kTfLiteBuiltinMinimum);
620 case kTfLiteBuiltinMul:
621 return VisitElementwiseBinaryOperator(delegateData,
622 tfLiteContext,
623 tfLiteNode,
624 nodeIndex,
625 kTfLiteBuiltinMul);
626 case kTfLiteBuiltinNeg:
627 return VisitElementwiseUnaryOperator(delegateData,
628 tfLiteContext,
629 tfLiteNode,
630 nodeIndex,
631 armnn::UnaryOperation::Neg);
632 case kTfLiteBuiltinNotEqual:
633 return VisitComparisonOperator(delegateData,
634 tfLiteContext,
635 tfLiteNode,
636 nodeIndex,
637 kTfLiteBuiltinNotEqual);
638 case kTfLiteBuiltinPad:
639 return VisitPadOperator(delegateData,
640 tfLiteContext,
641 tfLiteNode,
642 nodeIndex,
643 kTfLiteBuiltinPad);
644 case kTfLiteBuiltinPadv2:
645 return VisitPadOperator(delegateData,
646 tfLiteContext,
647 tfLiteNode,
648 nodeIndex,
649 kTfLiteBuiltinPadv2);
650 case kTfLiteBuiltinPrelu:
651 return VisitActivationOperator(delegateData,
652 tfLiteContext,
653 tfLiteNode,
654 nodeIndex,
655 kTfLiteBuiltinPrelu);
656 case kTfLiteBuiltinQuantize:
657 return VisitQuantizeOperator(delegateData,
658 tfLiteContext,
659 tfLiteNode,
660 nodeIndex,
661 kTfLiteBuiltinQuantize);
662 case kTfLiteBuiltinRank:
663 return VisitControlOperator(delegateData,
664 tfLiteContext,
665 tfLiteNode,
666 nodeIndex,
667 kTfLiteBuiltinRank);
668 case kTfLiteBuiltinRelu:
669 return VisitActivationOperator(delegateData,
670 tfLiteContext,
671 tfLiteNode,
672 nodeIndex,
673 kTfLiteBuiltinRelu);
674 case kTfLiteBuiltinReluN1To1:
675 return VisitActivationOperator(delegateData,
676 tfLiteContext,
677 tfLiteNode,
678 nodeIndex,
679 kTfLiteBuiltinReluN1To1);
680 case kTfLiteBuiltinRelu6:
681 return VisitActivationOperator(delegateData,
682 tfLiteContext,
683 tfLiteNode,
684 nodeIndex,
685 kTfLiteBuiltinRelu6);
686 case kTfLiteBuiltinReshape:
687 return VisitReshapeOperator(delegateData,
688 tfLiteContext,
689 tfLiteNode,
690 nodeIndex,
691 kTfLiteBuiltinReshape);
692 case kTfLiteBuiltinResizeBilinear:
693 return VisitResizeOperator(delegateData,
694 tfLiteContext,
695 tfLiteNode,
696 nodeIndex,
697 kTfLiteBuiltinResizeBilinear);
698 case kTfLiteBuiltinResizeNearestNeighbor:
699 return VisitResizeOperator(delegateData,
700 tfLiteContext,
701 tfLiteNode,
702 nodeIndex,
703 kTfLiteBuiltinResizeNearestNeighbor);
704 case kTfLiteBuiltinRsqrt:
705 return VisitElementwiseUnaryOperator(delegateData,
706 tfLiteContext,
707 tfLiteNode,
708 nodeIndex,
709 armnn::UnaryOperation::Rsqrt);
710 case kTfLiteBuiltinSqrt:
711 return VisitElementwiseUnaryOperator(delegateData,
712 tfLiteContext,
713 tfLiteNode,
714 nodeIndex,
715 armnn::UnaryOperation::Sqrt);
716 case kTfLiteBuiltinSqueeze:
717 return VisitSqueezeOperator(delegateData,
718 tfLiteContext,
719 tfLiteNode,
720 nodeIndex,
721 kTfLiteBuiltinSqueeze);
722 case kTfLiteBuiltinStridedSlice:
723 return VisitSliceOperator(delegateData,
724 tfLiteContext,
725 tfLiteNode,
726 nodeIndex,
727 kTfLiteBuiltinStridedSlice);
728 case kTfLiteBuiltinTranspose:
729 return VisitTransposeOperator(delegateData,
730 tfLiteContext,
731 tfLiteNode,
732 nodeIndex,
733 kTfLiteBuiltinTranspose);
734 case kTfLiteBuiltinTransposeConv:
735 return VisitConvolutionOperator(delegateData,
736 tfLiteContext,
737 tfLiteNode,
738 nodeIndex,
739 kTfLiteBuiltinTransposeConv);
740 case kTfLiteBuiltinSoftmax:
741 return VisitSoftmaxOperator(delegateData,
742 tfLiteContext,
743 tfLiteNode,
744 nodeIndex,
745 kTfLiteBuiltinSoftmax);
746 case kTfLiteBuiltinSpaceToBatchNd:
747 return VisitSpaceToBatchNdOperator(delegateData,
748 tfLiteContext,
749 tfLiteNode,
750 nodeIndex,
751 kTfLiteBuiltinSpaceToBatchNd);
752 case kTfLiteBuiltinSpaceToDepth:
753 return VisitSpaceToDepthOperator(delegateData,
754 tfLiteContext,
755 tfLiteNode,
756 nodeIndex,
757 kTfLiteBuiltinSpaceToDepth);
758 case kTfLiteBuiltinSub:
759 return VisitElementwiseBinaryOperator(delegateData,
760 tfLiteContext,
761 tfLiteNode,
762 nodeIndex,
763 kTfLiteBuiltinSub);
764 case kTfLiteBuiltinTanh:
765 return VisitActivationOperator(delegateData,
766 tfLiteContext,
767 tfLiteNode,
768 nodeIndex,
769 kTfLiteBuiltinTanh);
770 default:
771 return kTfLiteError;
772 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100773}
774
775} // armnnDelegate namespace