blob: 3380c84d0b7c02ba0698328b868c33be360f26cf [file] [log] [blame]
Sadik Armagan3c24f432020-10-19 17:35:30 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +01007
8#include "Activation.hpp"
9#include "ArgMinMax.hpp"
10#include "BatchSpace.hpp"
11#include "Comparison.hpp"
12#include "Convolution.hpp"
13#include "Control.hpp"
14#include "ElementwiseBinary.hpp"
15#include "ElementwiseUnary.hpp"
16#include "Fill.hpp"
17#include "FullyConnected.hpp"
18#include "Gather.hpp"
19#include "Lstm.hpp"
20#include "Normalization.hpp"
21#include "Pad.hpp"
22#include "Pooling.hpp"
23#include "Quantization.hpp"
24#include "Redefine.hpp"
25#include "Resize.hpp"
26#include "Round.hpp"
27#include "Slice.hpp"
28#include "Softmax.hpp"
29#include "SpaceDepth.hpp"
30#include "Transpose.hpp"
31
32#include <flatbuffers/flatbuffers.h>
33#include <tensorflow/lite/context_util.h>
34
Sadik Armagan3c24f432020-10-19 17:35:30 +010035#include <algorithm>
Sadik Armagan62483be2020-10-23 17:14:43 +010036#include <sstream>
Sadik Armagan3c24f432020-10-19 17:35:30 +010037
38namespace armnnDelegate
39{
40
Sadik Armagan62483be2020-10-23 17:14:43 +010041DelegateOptions TfLiteArmnnDelegateOptionsDefault()
42{
43 DelegateOptions options(armnn::Compute::CpuRef);
44 return options;
45}
46
47TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
48{
49 auto* armnnDelegate = new ::armnnDelegate::Delegate(options);
50 return armnnDelegate->GetDelegate();
51}
52
53void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate)
54{
55 if (tfLiteDelegate != nullptr)
56 {
57 delete static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_);
58 }
59}
60
61TfLiteStatus DoPrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDelegate)
62{
63 TfLiteIntArray* supportedOperators =
64 static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_)->IdentifyOperatorsToDelegate(tfLiteContext);
65
66 // ArmNN Delegate Registration
67 static const TfLiteRegistration kArmnnSubgraphRegistration = {
68 // ArmnnSubgraph Init
69 .init = [](TfLiteContext* tfLiteContext, const char* buffer, size_t length) -> void* {
70 const TfLiteDelegateParams* parameters = reinterpret_cast<const TfLiteDelegateParams*>(buffer);
71
72 return static_cast<void*>(ArmnnSubgraph::Create(
73 tfLiteContext, parameters, static_cast<::armnnDelegate::Delegate*>(parameters->delegate->data_)));
74 },
75 // ArmnnSubgraph Free
76 .free = [](TfLiteContext* tfLiteContext, void* buffer) -> void {
77 if (buffer != nullptr)
78 {
79 delete static_cast<ArmnnSubgraph*>(buffer);
80 }
81 },
82 // ArmnnSubgraph Prepare
83 .prepare = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
84 if (tfLiteNode->user_data == nullptr)
85 {
86 return kTfLiteError;
87 }
Sadik Armagan62483be2020-10-23 17:14:43 +010088 return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Prepare(tfLiteContext);
89 },
90 // ArmnnSubgraph Invoke
91 .invoke = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
92 if (tfLiteNode->user_data == nullptr)
93 {
94 return kTfLiteError;
95 }
96
97 return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Invoke(tfLiteContext, tfLiteNode);
98 },
99
100 .profiling_string = nullptr,
101 .builtin_code = kTfLiteBuiltinDelegate,
102 .custom_name = "TfLiteArmNnDelegate",
103 .version = 1,
104 };
105
106 const TfLiteStatus status =
107 tfLiteContext->ReplaceNodeSubsetsWithDelegateKernels(
108 tfLiteContext, kArmnnSubgraphRegistration, supportedOperators, tfLiteDelegate);
109
110 TfLiteIntArrayFree(supportedOperators);
111 return status;
112
113}
114
Sadik Armagan3c24f432020-10-19 17:35:30 +0100115Delegate::Delegate(armnnDelegate::DelegateOptions options)
116 : m_Runtime(nullptr, nullptr),
117 m_Options(std::move(options))
118{
119 // Create ArmNN Runtime
120 armnn::IRuntime::CreationOptions runtimeOptions;
Sadik Armagan4189cc52020-11-11 18:01:48 +0000121
122 auto backendOptions = m_Options.GetBackendOptions();
123 if (!backendOptions.empty())
124 {
125 runtimeOptions.m_BackendOptions = backendOptions;
126 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100127 m_Runtime = armnn::IRuntime::Create(runtimeOptions);
128
129 std::vector<armnn::BackendId> backends;
Sadik Armagan3c24f432020-10-19 17:35:30 +0100130 if (m_Runtime)
131 {
132 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
133 for (auto& backend : m_Options.GetBackends())
134 {
135 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
136 {
Sadik Armagan0534e032020-10-27 17:30:18 +0000137 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
Sadik Armagan3c24f432020-10-19 17:35:30 +0100138 "TfLiteArmnnDelegate: Requested unknown backend %s", backend.Get().c_str());
139 }
140 else
141 {
142 backends.push_back(backend);
143 }
144 }
145 }
146
147 if (backends.empty())
148 {
149 // No known backend specified
150 throw armnn::InvalidArgumentException("TfLiteArmnnDelegate: No known backend specified.");
151 }
152 m_Options.SetBackends(backends);
153
154 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnDelegate: Created TfLite ArmNN delegate.");
155}
156
Sadik Armagan62483be2020-10-23 17:14:43 +0100157TfLiteIntArray* Delegate::IdentifyOperatorsToDelegate(TfLiteContext* tfLiteContext)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100158{
159 TfLiteIntArray* executionPlan = nullptr;
160 if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
161 {
162 TF_LITE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Unable to get graph execution plan.");
163 return nullptr;
164 }
165
Sadik Armagan62483be2020-10-23 17:14:43 +0100166 // Delegate data with null network
167 DelegateData delegateData(m_Options.GetBackends());
Sadik Armagan3c24f432020-10-19 17:35:30 +0100168
169 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
170 nodesToDelegate->size = 0;
171 for (int i = 0; i < executionPlan->size; ++i)
172 {
173 const int nodeIndex = executionPlan->data[i];
174
175 // If TfLite nodes can be delegated to ArmNN
176 TfLiteNode* tfLiteNode = nullptr;
177 TfLiteRegistration* tfLiteRegistration = nullptr;
178 if (tfLiteContext->GetNodeAndRegistration(
179 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
180 {
181 TF_LITE_KERNEL_LOG(tfLiteContext,
182 "TfLiteArmnnDelegate: Unable to get node and registration for node %d.",
183 nodeIndex);
184 continue;
185 }
186
187 if (ArmnnSubgraph::VisitNode(
Sadik Armagan62483be2020-10-23 17:14:43 +0100188 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100189 {
190 // node is not supported by ArmNN
191 continue;
192 }
193
194 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
195 }
196
Sadik Armagan62483be2020-10-23 17:14:43 +0100197 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100198 return nodesToDelegate;
199}
200
201TfLiteDelegate* Delegate::GetDelegate()
202{
203 return &m_Delegate;
204}
205
Sadik Armagan62483be2020-10-23 17:14:43 +0100206TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
207 TfLiteContext* tfLiteContext,
208 const TfLiteIntArray* inputs,
209 std::vector<armnn::BindingPointInfo>& inputBindings)
210{
211 const size_t numInputs = inputs->size;
212 for (unsigned int i = 0; i < numInputs; ++i)
213 {
214 const int32_t tensorId = inputs->data[i];
215 const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
Sadik Armagan6e36a642020-11-10 21:18:41 +0000216 // Do not create bindings for constant inputs
217 if (tensor.allocation_type == kTfLiteMmapRo)
218 {
219 continue;
220 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100221
222 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
223 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
224
225 auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
226 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
227 outputSlot.SetTensorInfo(tensorInfo);
228
229 // Store for creating connections
230 delegateData.m_OutputSlotForNode[tensorId] = &outputSlot;
231
Sadik Armagan6e36a642020-11-10 21:18:41 +0000232 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
Sadik Armagan62483be2020-10-23 17:14:43 +0100233 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000234
Sadik Armagan62483be2020-10-23 17:14:43 +0100235 return kTfLiteOk;
236}
237
238TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
239 TfLiteContext* tfLiteContext,
240 const TfLiteIntArray* outputs,
241 std::vector<armnn::BindingPointInfo>& outputBindings)
242{
243 const size_t numOutputs = outputs->size;
244 for (unsigned int i = 0; i < numOutputs; ++i)
245 {
246 const int32_t tensorId = outputs->data[i];
247 const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
248
249 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
250 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
251
252 auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100253 ARMNN_ASSERT(delegateData.m_OutputSlotForNode[tensorId] != nullptr);
254 delegateData.m_OutputSlotForNode[tensorId]->Connect(layer->GetInputSlot(0));
255 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
256 }
257
258 return kTfLiteOk;
259}
260
Sadik Armagan3c24f432020-10-19 17:35:30 +0100261ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
262 const TfLiteDelegateParams* parameters,
263 const Delegate* delegate)
264{
265 TfLiteIntArray* executionPlan;
266 if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
267 {
268 return nullptr;
269 }
270
Sadik Armagan62483be2020-10-23 17:14:43 +0100271 // Initialize DelegateData holds network and output slots information
272 DelegateData delegateData(delegate->m_Options.GetBackends());
273
274 // Build ArmNN Network
Sadik Armagan3c24f432020-10-19 17:35:30 +0100275 using NetworkOptions = std::vector<armnn::BackendOptions>;
276 armnn::NetworkOptions networkOptions = {};
277 armnn::NetworkId networkId;
Sadik Armagan62483be2020-10-23 17:14:43 +0100278 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100279
Sadik Armagan6e36a642020-11-10 21:18:41 +0000280 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(tfLiteContext->tensors_size, nullptr);
281
Sadik Armagan62483be2020-10-23 17:14:43 +0100282
283 std::vector<armnn::BindingPointInfo> inputBindings;
284 std::vector<armnn::BindingPointInfo> outputBindings;
285
286 // Add input layer
287 auto status = AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings);
288 if (status != kTfLiteOk)
289 {
290 throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Inputs to the network!");
291 }
292
293 // Parse TfLite delegate nodes to ArmNN
Sadik Armagan3c24f432020-10-19 17:35:30 +0100294 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
295 {
296 const int nodeIndex = parameters->nodes_to_replace->data[i];
297
298 TfLiteNode* tfLiteNode = nullptr;
299 TfLiteRegistration* tfLiteRegistration = nullptr;
300 if (tfLiteContext->GetNodeAndRegistration(
301 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
302 {
303 throw armnn::Exception("TfLiteArmnnDelegate: Unable to get node registration: " + nodeIndex);
304 }
305
Sadik Armagan62483be2020-10-23 17:14:43 +0100306 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100307 {
308 throw armnn::Exception("TfLiteArmnnDelegate: Unable to parse node: " + nodeIndex);
309 }
310 }
311
Sadik Armagan62483be2020-10-23 17:14:43 +0100312 // Add Output layer
313 status = AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings);
314 if (status != kTfLiteOk)
315 {
316 throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Outputs to the network!");
317 }
318
319 // Optimize ArmNN network
320 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
321 try
322 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000323 optNet = armnn::Optimize(*(delegateData.m_Network.get()),
Sadik Armagan62483be2020-10-23 17:14:43 +0100324 delegate->m_Options.GetBackends(),
325 delegate->m_Runtime->GetDeviceSpec());
326 }
327 catch (std::exception &ex)
328 {
329 std::stringstream exMessage;
330 exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from optimize.";
331 throw armnn::Exception(exMessage.str());
332 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100333 if (!optNet)
334 {
Sadik Armagan62483be2020-10-23 17:14:43 +0100335 // Optimize failed
Sadik Armagan3c24f432020-10-19 17:35:30 +0100336 throw armnn::Exception("TfLiteArmnnDelegate: Unable to optimize the network!");
337 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100338
339 try
340 {
341 // Load graph into runtime
342 auto loadingStatus = delegate->m_Runtime->LoadNetwork(networkId, std::move(optNet));
343 if (loadingStatus != armnn::Status::Success)
344 {
345 // Optimize failed
346 throw armnn::Exception("TfLiteArmnnDelegate: Network could not be loaded!");;
347 }
348 }
349 catch (std::exception& ex)
350 {
351 std::stringstream exMessage;
352 exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
353 throw armnn::Exception(exMessage.str());
354 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100355
356 // Create a new SubGraph with networkId and runtime
Sadik Armagan62483be2020-10-23 17:14:43 +0100357 return new ArmnnSubgraph(networkId, delegate->m_Runtime.get(), inputBindings, outputBindings);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100358}
359
360TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext)
361{
362 return kTfLiteOk;
363}
364
Sadik Armagan62483be2020-10-23 17:14:43 +0100365TfLiteStatus ArmnnSubgraph::Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100366{
Sadik Armagan62483be2020-10-23 17:14:43 +0100367 // Prepare inputs
368 armnn::InputTensors inputTensors;
369 size_t inputIndex = 0;
370 for (auto inputIdx : tflite::TfLiteIntArrayView(tfLiteNode->inputs))
371 {
372 TfLiteTensor* tensor = &tfLiteContext->tensors[inputIdx];
373 if (tensor->allocation_type != kTfLiteMmapRo)
374 {
375 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
376 const armnn::ConstTensor inputTensor(inputBinding.second, tensor->data.data);
377 inputTensors.emplace_back(inputIdx, inputTensor);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100378
Sadik Armagan62483be2020-10-23 17:14:43 +0100379 ++inputIndex;
380 }
381 }
382
383 // Prepare outputs
384 armnn::OutputTensors outputTensors;
385 size_t outputIndex = 0;
386 for (auto outputIdx : tflite::TfLiteIntArrayView(tfLiteNode->outputs))
387 {
388 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIndex];
389 TfLiteTensor* tensor = &tfLiteContext->tensors[outputIdx];
390 const armnn::Tensor outputTensor(outputBinding.second, tensor->data.data);
391 outputTensors.emplace_back(outputIdx, outputTensor);
392
393 ++outputIndex;
394 }
395
396 // Run graph
397 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
398 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
Sadik Armagan3c24f432020-10-19 17:35:30 +0100399}
400
Sadik Armagan62483be2020-10-23 17:14:43 +0100401TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
Sadik Armagan3c24f432020-10-19 17:35:30 +0100402 TfLiteContext* tfLiteContext,
403 TfLiteRegistration* tfLiteRegistration,
404 TfLiteNode* tfLiteNode,
405 int nodeIndex)
406{
Sadik Armagan62483be2020-10-23 17:14:43 +0100407 switch (tfLiteRegistration->builtin_code)
408 {
409 case kTfLiteBuiltinAbs:
410 return VisitElementwiseUnaryOperator(delegateData,
411 tfLiteContext,
412 tfLiteNode,
413 nodeIndex,
414 armnn::UnaryOperation::Abs);
415 case kTfLiteBuiltinAdd:
416 return VisitElementwiseBinaryOperator(delegateData,
417 tfLiteContext,
418 tfLiteNode,
419 nodeIndex,
420 kTfLiteBuiltinAdd);
421 case kTfLiteBuiltinArgMax:
422 return VisitArgMinMaxOperator(delegateData,
423 tfLiteContext,
424 tfLiteNode,
425 nodeIndex,
426 kTfLiteBuiltinArgMax);
427 case kTfLiteBuiltinArgMin:
428 return VisitArgMinMaxOperator(delegateData,
429 tfLiteContext,
430 tfLiteNode,
431 nodeIndex,
432 kTfLiteBuiltinArgMin);
433 case kTfLiteBuiltinAveragePool2d:
434 return VisitPoolingOperator(delegateData,
435 tfLiteContext,
436 tfLiteNode,
437 nodeIndex,
438 kTfLiteBuiltinAveragePool2d);
439 case kTfLiteBuiltinBatchToSpaceNd:
440 return VisitBatchToSpaceNdOperator(delegateData,
441 tfLiteContext,
442 tfLiteNode,
443 nodeIndex,
444 kTfLiteBuiltinBatchToSpaceNd);
445 case kTfLiteBuiltinConcatenation:
446 return VisitControlOperator(delegateData,
447 tfLiteContext,
448 tfLiteNode,
449 nodeIndex,
450 kTfLiteBuiltinConcatenation);
451 case kTfLiteBuiltinConv2d:
452 return VisitConvolutionOperator(delegateData,
453 tfLiteContext,
454 tfLiteNode,
455 nodeIndex,
456 kTfLiteBuiltinConv2d);
457 case kTfLiteBuiltinDepthToSpace:
458 return VisitDepthToSpaceOperator(delegateData,
459 tfLiteContext,
460 tfLiteNode,
461 nodeIndex,
462 kTfLiteBuiltinDepthToSpace);
463 case kTfLiteBuiltinDepthwiseConv2d:
464 return VisitConvolutionOperator(delegateData,
465 tfLiteContext,
466 tfLiteNode,
467 nodeIndex,
468 kTfLiteBuiltinDepthwiseConv2d);
469 case kTfLiteBuiltinDequantize:
470 return VisitDequantizeOperator(delegateData,
471 tfLiteContext,
472 tfLiteNode,
473 nodeIndex,
474 kTfLiteBuiltinDequantize);
475 case kTfLiteBuiltinDiv:
476 return VisitElementwiseBinaryOperator(delegateData,
477 tfLiteContext,
478 tfLiteNode,
479 nodeIndex,
480 kTfLiteBuiltinDiv);
481 case kTfLiteBuiltinElu:
482 return VisitActivationOperator(delegateData,
483 tfLiteContext,
484 tfLiteNode,
485 nodeIndex,
486 kTfLiteBuiltinElu);
487 case kTfLiteBuiltinEqual:
488 return VisitComparisonOperator(delegateData,
489 tfLiteContext,
490 tfLiteNode,
491 nodeIndex,
492 kTfLiteBuiltinEqual);
493 case kTfLiteBuiltinExp:
494 return VisitElementwiseUnaryOperator(delegateData,
495 tfLiteContext,
496 tfLiteNode,
497 nodeIndex,
498 armnn::UnaryOperation::Exp);
499 case kTfLiteBuiltinExpandDims:
500 return VisitExpandDimsOperator(delegateData,
501 tfLiteContext,
502 tfLiteNode,
503 nodeIndex,
504 kTfLiteBuiltinExpandDims);
505 case kTfLiteBuiltinFill:
506 return VisitFillOperator(delegateData,
507 tfLiteContext,
508 tfLiteNode,
509 nodeIndex,
510 kTfLiteBuiltinFill);
511 case kTfLiteBuiltinFloor:
512 return VisitFloorOperator(delegateData,
513 tfLiteContext,
514 tfLiteNode,
515 nodeIndex,
516 kTfLiteBuiltinFloor);
517 case kTfLiteBuiltinFullyConnected:
518 return VisitFullyConnectedOperator(delegateData,
519 tfLiteContext,
520 tfLiteNode,
521 nodeIndex,
522 kTfLiteBuiltinFullyConnected);
523 case kTfLiteBuiltinGather:
524 return VisitGatherOperator(delegateData,
525 tfLiteContext,
526 tfLiteNode,
527 nodeIndex,
528 kTfLiteBuiltinGather);
529 case kTfLiteBuiltinGatherNd:
530 return VisitGatherOperator(delegateData,
531 tfLiteContext,
532 tfLiteNode,
533 nodeIndex,
534 kTfLiteBuiltinGatherNd);
535 case kTfLiteBuiltinGreater:
536 return VisitComparisonOperator(delegateData,
537 tfLiteContext,
538 tfLiteNode,
539 nodeIndex,
540 kTfLiteBuiltinGreater);
541 case kTfLiteBuiltinGreaterEqual:
542 return VisitComparisonOperator(delegateData,
543 tfLiteContext,
544 tfLiteNode,
545 nodeIndex,
546 kTfLiteBuiltinGreaterEqual);
547 case kTfLiteBuiltinHardSwish:
548 return VisitActivationOperator(delegateData,
549 tfLiteContext,
550 tfLiteNode,
551 nodeIndex,
552 kTfLiteBuiltinHardSwish);
553 case kTfLiteBuiltinL2Normalization:
554 return VisitNormalizationOperator(delegateData,
555 tfLiteContext,
556 tfLiteNode,
557 nodeIndex,
558 kTfLiteBuiltinL2Normalization);
559 case kTfLiteBuiltinL2Pool2d:
560 return VisitPoolingOperator(delegateData,
561 tfLiteContext,
562 tfLiteNode,
563 nodeIndex,
564 kTfLiteBuiltinL2Pool2d);
565 case kTfLiteBuiltinLess:
566 return VisitComparisonOperator(delegateData,
567 tfLiteContext,
568 tfLiteNode,
569 nodeIndex,
570 kTfLiteBuiltinLess);
571 case kTfLiteBuiltinLessEqual:
572 return VisitComparisonOperator(delegateData,
573 tfLiteContext,
574 tfLiteNode,
575 nodeIndex,
576 kTfLiteBuiltinLessEqual);
577 case kTfLiteBuiltinLocalResponseNormalization:
578 return VisitNormalizationOperator(delegateData,
579 tfLiteContext,
580 tfLiteNode,
581 nodeIndex,
582 kTfLiteBuiltinLocalResponseNormalization);
583 case kTfLiteBuiltinLogistic:
584 return VisitActivationOperator(delegateData,
585 tfLiteContext,
586 tfLiteNode,
587 nodeIndex,
588 kTfLiteBuiltinLogistic);
589 case kTfLiteBuiltinLogSoftmax:
590 return VisitSoftmaxOperator(delegateData,
591 tfLiteContext,
592 tfLiteNode,
593 nodeIndex,
594 kTfLiteBuiltinLogSoftmax);
595 case kTfLiteBuiltinLstm:
596 return VisitLstmOperator(delegateData,
597 tfLiteContext,
598 tfLiteNode,
599 nodeIndex,
600 kTfLiteBuiltinLstm);
601 case kTfLiteBuiltinMaxPool2d:
602 return VisitPoolingOperator(delegateData,
603 tfLiteContext,
604 tfLiteNode,
605 nodeIndex,
606 kTfLiteBuiltinMaxPool2d);
607 case kTfLiteBuiltinMaximum:
608 return VisitElementwiseBinaryOperator(delegateData,
609 tfLiteContext,
610 tfLiteNode,
611 nodeIndex,
612 kTfLiteBuiltinMaximum);
613 case kTfLiteBuiltinMean:
614 return VisitControlOperator(delegateData,
615 tfLiteContext,
616 tfLiteNode,
617 nodeIndex,
618 kTfLiteBuiltinMean);
619 case kTfLiteBuiltinMinimum:
620 return VisitElementwiseBinaryOperator(delegateData,
621 tfLiteContext,
622 tfLiteNode,
623 nodeIndex,
624 kTfLiteBuiltinMinimum);
625 case kTfLiteBuiltinMul:
626 return VisitElementwiseBinaryOperator(delegateData,
627 tfLiteContext,
628 tfLiteNode,
629 nodeIndex,
630 kTfLiteBuiltinMul);
631 case kTfLiteBuiltinNeg:
632 return VisitElementwiseUnaryOperator(delegateData,
633 tfLiteContext,
634 tfLiteNode,
635 nodeIndex,
636 armnn::UnaryOperation::Neg);
637 case kTfLiteBuiltinNotEqual:
638 return VisitComparisonOperator(delegateData,
639 tfLiteContext,
640 tfLiteNode,
641 nodeIndex,
642 kTfLiteBuiltinNotEqual);
643 case kTfLiteBuiltinPad:
644 return VisitPadOperator(delegateData,
645 tfLiteContext,
646 tfLiteNode,
647 nodeIndex,
648 kTfLiteBuiltinPad);
649 case kTfLiteBuiltinPadv2:
650 return VisitPadOperator(delegateData,
651 tfLiteContext,
652 tfLiteNode,
653 nodeIndex,
654 kTfLiteBuiltinPadv2);
655 case kTfLiteBuiltinPrelu:
656 return VisitActivationOperator(delegateData,
657 tfLiteContext,
658 tfLiteNode,
659 nodeIndex,
660 kTfLiteBuiltinPrelu);
661 case kTfLiteBuiltinQuantize:
662 return VisitQuantizeOperator(delegateData,
663 tfLiteContext,
664 tfLiteNode,
665 nodeIndex,
666 kTfLiteBuiltinQuantize);
667 case kTfLiteBuiltinRank:
668 return VisitControlOperator(delegateData,
669 tfLiteContext,
670 tfLiteNode,
671 nodeIndex,
672 kTfLiteBuiltinRank);
673 case kTfLiteBuiltinRelu:
674 return VisitActivationOperator(delegateData,
675 tfLiteContext,
676 tfLiteNode,
677 nodeIndex,
678 kTfLiteBuiltinRelu);
679 case kTfLiteBuiltinReluN1To1:
680 return VisitActivationOperator(delegateData,
681 tfLiteContext,
682 tfLiteNode,
683 nodeIndex,
684 kTfLiteBuiltinReluN1To1);
685 case kTfLiteBuiltinRelu6:
686 return VisitActivationOperator(delegateData,
687 tfLiteContext,
688 tfLiteNode,
689 nodeIndex,
690 kTfLiteBuiltinRelu6);
691 case kTfLiteBuiltinReshape:
692 return VisitReshapeOperator(delegateData,
693 tfLiteContext,
694 tfLiteNode,
695 nodeIndex,
696 kTfLiteBuiltinReshape);
697 case kTfLiteBuiltinResizeBilinear:
698 return VisitResizeOperator(delegateData,
699 tfLiteContext,
700 tfLiteNode,
701 nodeIndex,
702 kTfLiteBuiltinResizeBilinear);
703 case kTfLiteBuiltinResizeNearestNeighbor:
704 return VisitResizeOperator(delegateData,
705 tfLiteContext,
706 tfLiteNode,
707 nodeIndex,
708 kTfLiteBuiltinResizeNearestNeighbor);
709 case kTfLiteBuiltinRsqrt:
710 return VisitElementwiseUnaryOperator(delegateData,
711 tfLiteContext,
712 tfLiteNode,
713 nodeIndex,
714 armnn::UnaryOperation::Rsqrt);
715 case kTfLiteBuiltinSqrt:
716 return VisitElementwiseUnaryOperator(delegateData,
717 tfLiteContext,
718 tfLiteNode,
719 nodeIndex,
720 armnn::UnaryOperation::Sqrt);
721 case kTfLiteBuiltinSqueeze:
722 return VisitSqueezeOperator(delegateData,
723 tfLiteContext,
724 tfLiteNode,
725 nodeIndex,
726 kTfLiteBuiltinSqueeze);
727 case kTfLiteBuiltinStridedSlice:
728 return VisitSliceOperator(delegateData,
729 tfLiteContext,
730 tfLiteNode,
731 nodeIndex,
732 kTfLiteBuiltinStridedSlice);
733 case kTfLiteBuiltinTranspose:
734 return VisitTransposeOperator(delegateData,
735 tfLiteContext,
736 tfLiteNode,
737 nodeIndex,
738 kTfLiteBuiltinTranspose);
739 case kTfLiteBuiltinTransposeConv:
740 return VisitConvolutionOperator(delegateData,
741 tfLiteContext,
742 tfLiteNode,
743 nodeIndex,
744 kTfLiteBuiltinTransposeConv);
745 case kTfLiteBuiltinSoftmax:
746 return VisitSoftmaxOperator(delegateData,
747 tfLiteContext,
748 tfLiteNode,
749 nodeIndex,
750 kTfLiteBuiltinSoftmax);
751 case kTfLiteBuiltinSpaceToBatchNd:
752 return VisitSpaceToBatchNdOperator(delegateData,
753 tfLiteContext,
754 tfLiteNode,
755 nodeIndex,
756 kTfLiteBuiltinSpaceToBatchNd);
757 case kTfLiteBuiltinSpaceToDepth:
758 return VisitSpaceToDepthOperator(delegateData,
759 tfLiteContext,
760 tfLiteNode,
761 nodeIndex,
762 kTfLiteBuiltinSpaceToDepth);
763 case kTfLiteBuiltinSub:
764 return VisitElementwiseBinaryOperator(delegateData,
765 tfLiteContext,
766 tfLiteNode,
767 nodeIndex,
768 kTfLiteBuiltinSub);
769 case kTfLiteBuiltinTanh:
770 return VisitActivationOperator(delegateData,
771 tfLiteContext,
772 tfLiteNode,
773 nodeIndex,
774 kTfLiteBuiltinTanh);
775 default:
776 return kTfLiteError;
777 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100778}
779
780} // armnnDelegate namespace