blob: 5139adbf757b341eb3e7f29a21eff1f3c0a6bdc4 [file] [log] [blame]
Sadik Armagan3c24f432020-10-19 17:35:30 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +01007
8#include "Activation.hpp"
9#include "ArgMinMax.hpp"
10#include "BatchSpace.hpp"
11#include "Comparison.hpp"
12#include "Convolution.hpp"
13#include "Control.hpp"
14#include "ElementwiseBinary.hpp"
15#include "ElementwiseUnary.hpp"
16#include "Fill.hpp"
17#include "FullyConnected.hpp"
18#include "Gather.hpp"
Matthew Sloyanc8eb9552020-11-26 10:54:22 +000019#include "LogicalBinary.hpp"
Sadik Armagan62483be2020-10-23 17:14:43 +010020#include "Lstm.hpp"
21#include "Normalization.hpp"
22#include "Pad.hpp"
23#include "Pooling.hpp"
24#include "Quantization.hpp"
25#include "Redefine.hpp"
26#include "Resize.hpp"
27#include "Round.hpp"
28#include "Slice.hpp"
29#include "Softmax.hpp"
30#include "SpaceDepth.hpp"
Sadik Armagan34fa1bd2020-11-27 12:40:52 +000031#include "Split.hpp"
Sadik Armagan62483be2020-10-23 17:14:43 +010032#include "Transpose.hpp"
33
34#include <flatbuffers/flatbuffers.h>
35#include <tensorflow/lite/context_util.h>
36
Sadik Armagan3c24f432020-10-19 17:35:30 +010037#include <algorithm>
Sadik Armagan62483be2020-10-23 17:14:43 +010038#include <sstream>
Sadik Armagan3c24f432020-10-19 17:35:30 +010039
40namespace armnnDelegate
41{
42
Sadik Armagan62483be2020-10-23 17:14:43 +010043DelegateOptions TfLiteArmnnDelegateOptionsDefault()
44{
45 DelegateOptions options(armnn::Compute::CpuRef);
46 return options;
47}
48
49TfLiteDelegate* TfLiteArmnnDelegateCreate(armnnDelegate::DelegateOptions options)
50{
51 auto* armnnDelegate = new ::armnnDelegate::Delegate(options);
52 return armnnDelegate->GetDelegate();
53}
54
55void TfLiteArmnnDelegateDelete(TfLiteDelegate* tfLiteDelegate)
56{
57 if (tfLiteDelegate != nullptr)
58 {
59 delete static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_);
60 }
61}
62
63TfLiteStatus DoPrepare(TfLiteContext* tfLiteContext, TfLiteDelegate* tfLiteDelegate)
64{
65 TfLiteIntArray* supportedOperators =
66 static_cast<::armnnDelegate::Delegate*>(tfLiteDelegate->data_)->IdentifyOperatorsToDelegate(tfLiteContext);
67
68 // ArmNN Delegate Registration
69 static const TfLiteRegistration kArmnnSubgraphRegistration = {
70 // ArmnnSubgraph Init
71 .init = [](TfLiteContext* tfLiteContext, const char* buffer, size_t length) -> void* {
Finn Williams6f9f9902020-11-13 13:23:15 +000072 armnn::IgnoreUnused(length);
Sadik Armagan62483be2020-10-23 17:14:43 +010073 const TfLiteDelegateParams* parameters = reinterpret_cast<const TfLiteDelegateParams*>(buffer);
74
75 return static_cast<void*>(ArmnnSubgraph::Create(
76 tfLiteContext, parameters, static_cast<::armnnDelegate::Delegate*>(parameters->delegate->data_)));
77 },
78 // ArmnnSubgraph Free
79 .free = [](TfLiteContext* tfLiteContext, void* buffer) -> void {
Finn Williams6f9f9902020-11-13 13:23:15 +000080 armnn::IgnoreUnused(tfLiteContext);
Sadik Armagan62483be2020-10-23 17:14:43 +010081 if (buffer != nullptr)
82 {
83 delete static_cast<ArmnnSubgraph*>(buffer);
84 }
85 },
86 // ArmnnSubgraph Prepare
87 .prepare = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
88 if (tfLiteNode->user_data == nullptr)
89 {
90 return kTfLiteError;
91 }
Sadik Armagan62483be2020-10-23 17:14:43 +010092 return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Prepare(tfLiteContext);
93 },
94 // ArmnnSubgraph Invoke
95 .invoke = [](TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode) -> TfLiteStatus {
96 if (tfLiteNode->user_data == nullptr)
97 {
98 return kTfLiteError;
99 }
100
101 return static_cast<ArmnnSubgraph*>(tfLiteNode->user_data)->Invoke(tfLiteContext, tfLiteNode);
102 },
103
104 .profiling_string = nullptr,
105 .builtin_code = kTfLiteBuiltinDelegate,
106 .custom_name = "TfLiteArmNnDelegate",
107 .version = 1,
108 };
109
110 const TfLiteStatus status =
111 tfLiteContext->ReplaceNodeSubsetsWithDelegateKernels(
112 tfLiteContext, kArmnnSubgraphRegistration, supportedOperators, tfLiteDelegate);
113
114 TfLiteIntArrayFree(supportedOperators);
115 return status;
116
117}
118
Sadik Armagan3c24f432020-10-19 17:35:30 +0100119Delegate::Delegate(armnnDelegate::DelegateOptions options)
120 : m_Runtime(nullptr, nullptr),
121 m_Options(std::move(options))
122{
123 // Create ArmNN Runtime
124 armnn::IRuntime::CreationOptions runtimeOptions;
Sadik Armagan4189cc52020-11-11 18:01:48 +0000125
126 auto backendOptions = m_Options.GetBackendOptions();
127 if (!backendOptions.empty())
128 {
129 runtimeOptions.m_BackendOptions = backendOptions;
130 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100131 m_Runtime = armnn::IRuntime::Create(runtimeOptions);
132
133 std::vector<armnn::BackendId> backends;
Sadik Armagan3c24f432020-10-19 17:35:30 +0100134 if (m_Runtime)
135 {
136 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
137 for (auto& backend : m_Options.GetBackends())
138 {
139 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
140 {
Sadik Armagan0534e032020-10-27 17:30:18 +0000141 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
Sadik Armagan3c24f432020-10-19 17:35:30 +0100142 "TfLiteArmnnDelegate: Requested unknown backend %s", backend.Get().c_str());
143 }
144 else
145 {
146 backends.push_back(backend);
147 }
148 }
149 }
150
151 if (backends.empty())
152 {
153 // No known backend specified
154 throw armnn::InvalidArgumentException("TfLiteArmnnDelegate: No known backend specified.");
155 }
156 m_Options.SetBackends(backends);
157
158 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnDelegate: Created TfLite ArmNN delegate.");
159}
160
Sadik Armagan62483be2020-10-23 17:14:43 +0100161TfLiteIntArray* Delegate::IdentifyOperatorsToDelegate(TfLiteContext* tfLiteContext)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100162{
163 TfLiteIntArray* executionPlan = nullptr;
164 if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
165 {
166 TF_LITE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Unable to get graph execution plan.");
167 return nullptr;
168 }
169
Sadik Armagan62483be2020-10-23 17:14:43 +0100170 // Delegate data with null network
171 DelegateData delegateData(m_Options.GetBackends());
Sadik Armagan3c24f432020-10-19 17:35:30 +0100172
173 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
174 nodesToDelegate->size = 0;
175 for (int i = 0; i < executionPlan->size; ++i)
176 {
177 const int nodeIndex = executionPlan->data[i];
178
179 // If TfLite nodes can be delegated to ArmNN
180 TfLiteNode* tfLiteNode = nullptr;
181 TfLiteRegistration* tfLiteRegistration = nullptr;
182 if (tfLiteContext->GetNodeAndRegistration(
183 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
184 {
185 TF_LITE_KERNEL_LOG(tfLiteContext,
186 "TfLiteArmnnDelegate: Unable to get node and registration for node %d.",
187 nodeIndex);
188 continue;
189 }
190
191 if (ArmnnSubgraph::VisitNode(
Sadik Armagan62483be2020-10-23 17:14:43 +0100192 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100193 {
194 // node is not supported by ArmNN
195 continue;
196 }
197
198 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
199 }
200
Sadik Armagan62483be2020-10-23 17:14:43 +0100201 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100202 return nodesToDelegate;
203}
204
205TfLiteDelegate* Delegate::GetDelegate()
206{
207 return &m_Delegate;
208}
209
Sadik Armagan62483be2020-10-23 17:14:43 +0100210TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
211 TfLiteContext* tfLiteContext,
212 const TfLiteIntArray* inputs,
213 std::vector<armnn::BindingPointInfo>& inputBindings)
214{
Finn Williams6f9f9902020-11-13 13:23:15 +0000215 const size_t numInputs = static_cast<size_t>(inputs->size);
Sadik Armagan62483be2020-10-23 17:14:43 +0100216 for (unsigned int i = 0; i < numInputs; ++i)
217 {
218 const int32_t tensorId = inputs->data[i];
219 const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
Sadik Armagan6e36a642020-11-10 21:18:41 +0000220 // Do not create bindings for constant inputs
221 if (tensor.allocation_type == kTfLiteMmapRo)
222 {
223 continue;
224 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100225
226 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
227 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
228
229 auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
230 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
231 outputSlot.SetTensorInfo(tensorInfo);
232
233 // Store for creating connections
Finn Williams6f9f9902020-11-13 13:23:15 +0000234 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
Sadik Armagan62483be2020-10-23 17:14:43 +0100235
Sadik Armagan6e36a642020-11-10 21:18:41 +0000236 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
Sadik Armagan62483be2020-10-23 17:14:43 +0100237 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000238
Sadik Armagan62483be2020-10-23 17:14:43 +0100239 return kTfLiteOk;
240}
241
242TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
243 TfLiteContext* tfLiteContext,
244 const TfLiteIntArray* outputs,
245 std::vector<armnn::BindingPointInfo>& outputBindings)
246{
Finn Williams6f9f9902020-11-13 13:23:15 +0000247 const size_t numOutputs = static_cast<size_t>(outputs->size);
Sadik Armagan62483be2020-10-23 17:14:43 +0100248 for (unsigned int i = 0; i < numOutputs; ++i)
249 {
250 const int32_t tensorId = outputs->data[i];
251 const TfLiteTensor tensor = tfLiteContext->tensors[tensorId];
252
253 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
254 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
255
256 auto tensorInfo = GetTensorInfoForTfLiteTensor(tensor);
Finn Williams6f9f9902020-11-13 13:23:15 +0000257 ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] != nullptr);
258 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
Sadik Armagan62483be2020-10-23 17:14:43 +0100259 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
260 }
261
262 return kTfLiteOk;
263}
264
Sadik Armagan3c24f432020-10-19 17:35:30 +0100265ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteContext* tfLiteContext,
266 const TfLiteDelegateParams* parameters,
267 const Delegate* delegate)
268{
269 TfLiteIntArray* executionPlan;
270 if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
271 {
272 return nullptr;
273 }
274
Sadik Armagan62483be2020-10-23 17:14:43 +0100275 // Initialize DelegateData holds network and output slots information
276 DelegateData delegateData(delegate->m_Options.GetBackends());
277
278 // Build ArmNN Network
Sadik Armagan3c24f432020-10-19 17:35:30 +0100279 armnn::NetworkOptions networkOptions = {};
280 armnn::NetworkId networkId;
Sadik Armagan62483be2020-10-23 17:14:43 +0100281 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100282
Sadik Armagan6e36a642020-11-10 21:18:41 +0000283 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(tfLiteContext->tensors_size, nullptr);
284
Sadik Armagan62483be2020-10-23 17:14:43 +0100285
286 std::vector<armnn::BindingPointInfo> inputBindings;
287 std::vector<armnn::BindingPointInfo> outputBindings;
288
289 // Add input layer
290 auto status = AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings);
291 if (status != kTfLiteOk)
292 {
293 throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Inputs to the network!");
294 }
295
296 // Parse TfLite delegate nodes to ArmNN
Sadik Armagan3c24f432020-10-19 17:35:30 +0100297 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
298 {
299 const int nodeIndex = parameters->nodes_to_replace->data[i];
300
301 TfLiteNode* tfLiteNode = nullptr;
302 TfLiteRegistration* tfLiteRegistration = nullptr;
303 if (tfLiteContext->GetNodeAndRegistration(
304 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
305 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000306 throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to get node registration: " [ nodeIndex]);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100307 }
308
Sadik Armagan62483be2020-10-23 17:14:43 +0100309 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100310 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000311 throw armnn::Exception(&"TfLiteArmnnDelegate: Unable to parse node: " [ nodeIndex]);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100312 }
313 }
314
Sadik Armagan62483be2020-10-23 17:14:43 +0100315 // Add Output layer
316 status = AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings);
317 if (status != kTfLiteOk)
318 {
319 throw armnn::Exception("TfLiteArmnnDelegate: Unable to add Outputs to the network!");
320 }
321
322 // Optimize ArmNN network
323 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
324 try
325 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000326 optNet = armnn::Optimize(*(delegateData.m_Network.get()),
Sadik Armagan62483be2020-10-23 17:14:43 +0100327 delegate->m_Options.GetBackends(),
328 delegate->m_Runtime->GetDeviceSpec());
329 }
330 catch (std::exception &ex)
331 {
332 std::stringstream exMessage;
333 exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from optimize.";
334 throw armnn::Exception(exMessage.str());
335 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100336 if (!optNet)
337 {
Sadik Armagan62483be2020-10-23 17:14:43 +0100338 // Optimize failed
Sadik Armagan3c24f432020-10-19 17:35:30 +0100339 throw armnn::Exception("TfLiteArmnnDelegate: Unable to optimize the network!");
340 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100341
342 try
343 {
344 // Load graph into runtime
345 auto loadingStatus = delegate->m_Runtime->LoadNetwork(networkId, std::move(optNet));
346 if (loadingStatus != armnn::Status::Success)
347 {
348 // Optimize failed
349 throw armnn::Exception("TfLiteArmnnDelegate: Network could not be loaded!");;
350 }
351 }
352 catch (std::exception& ex)
353 {
354 std::stringstream exMessage;
355 exMessage << "TfLiteArmnnDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
356 throw armnn::Exception(exMessage.str());
357 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100358
359 // Create a new SubGraph with networkId and runtime
Sadik Armagan62483be2020-10-23 17:14:43 +0100360 return new ArmnnSubgraph(networkId, delegate->m_Runtime.get(), inputBindings, outputBindings);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100361}
362
363TfLiteStatus ArmnnSubgraph::Prepare(TfLiteContext* tfLiteContext)
364{
Finn Williams6f9f9902020-11-13 13:23:15 +0000365 armnn::IgnoreUnused(tfLiteContext);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100366 return kTfLiteOk;
367}
368
Sadik Armagan62483be2020-10-23 17:14:43 +0100369TfLiteStatus ArmnnSubgraph::Invoke(TfLiteContext* tfLiteContext, TfLiteNode* tfLiteNode)
Sadik Armagan3c24f432020-10-19 17:35:30 +0100370{
Sadik Armagan62483be2020-10-23 17:14:43 +0100371 // Prepare inputs
372 armnn::InputTensors inputTensors;
373 size_t inputIndex = 0;
374 for (auto inputIdx : tflite::TfLiteIntArrayView(tfLiteNode->inputs))
375 {
376 TfLiteTensor* tensor = &tfLiteContext->tensors[inputIdx];
377 if (tensor->allocation_type != kTfLiteMmapRo)
378 {
379 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
380 const armnn::ConstTensor inputTensor(inputBinding.second, tensor->data.data);
381 inputTensors.emplace_back(inputIdx, inputTensor);
Sadik Armagan3c24f432020-10-19 17:35:30 +0100382
Sadik Armagan62483be2020-10-23 17:14:43 +0100383 ++inputIndex;
384 }
385 }
386
387 // Prepare outputs
388 armnn::OutputTensors outputTensors;
389 size_t outputIndex = 0;
390 for (auto outputIdx : tflite::TfLiteIntArrayView(tfLiteNode->outputs))
391 {
392 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIndex];
393 TfLiteTensor* tensor = &tfLiteContext->tensors[outputIdx];
394 const armnn::Tensor outputTensor(outputBinding.second, tensor->data.data);
395 outputTensors.emplace_back(outputIdx, outputTensor);
396
397 ++outputIndex;
398 }
399
400 // Run graph
401 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
402 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
Sadik Armagan3c24f432020-10-19 17:35:30 +0100403}
404
Sadik Armagan62483be2020-10-23 17:14:43 +0100405TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
Sadik Armagan3c24f432020-10-19 17:35:30 +0100406 TfLiteContext* tfLiteContext,
407 TfLiteRegistration* tfLiteRegistration,
408 TfLiteNode* tfLiteNode,
409 int nodeIndex)
410{
Sadik Armagan62483be2020-10-23 17:14:43 +0100411 switch (tfLiteRegistration->builtin_code)
412 {
413 case kTfLiteBuiltinAbs:
414 return VisitElementwiseUnaryOperator(delegateData,
415 tfLiteContext,
416 tfLiteNode,
417 nodeIndex,
418 armnn::UnaryOperation::Abs);
419 case kTfLiteBuiltinAdd:
420 return VisitElementwiseBinaryOperator(delegateData,
421 tfLiteContext,
422 tfLiteNode,
423 nodeIndex,
424 kTfLiteBuiltinAdd);
425 case kTfLiteBuiltinArgMax:
426 return VisitArgMinMaxOperator(delegateData,
427 tfLiteContext,
428 tfLiteNode,
429 nodeIndex,
430 kTfLiteBuiltinArgMax);
431 case kTfLiteBuiltinArgMin:
432 return VisitArgMinMaxOperator(delegateData,
433 tfLiteContext,
434 tfLiteNode,
435 nodeIndex,
436 kTfLiteBuiltinArgMin);
437 case kTfLiteBuiltinAveragePool2d:
438 return VisitPoolingOperator(delegateData,
439 tfLiteContext,
440 tfLiteNode,
441 nodeIndex,
442 kTfLiteBuiltinAveragePool2d);
443 case kTfLiteBuiltinBatchToSpaceNd:
444 return VisitBatchToSpaceNdOperator(delegateData,
445 tfLiteContext,
446 tfLiteNode,
447 nodeIndex,
448 kTfLiteBuiltinBatchToSpaceNd);
449 case kTfLiteBuiltinConcatenation:
450 return VisitControlOperator(delegateData,
451 tfLiteContext,
452 tfLiteNode,
453 nodeIndex,
454 kTfLiteBuiltinConcatenation);
455 case kTfLiteBuiltinConv2d:
456 return VisitConvolutionOperator(delegateData,
457 tfLiteContext,
458 tfLiteNode,
459 nodeIndex,
460 kTfLiteBuiltinConv2d);
461 case kTfLiteBuiltinDepthToSpace:
462 return VisitDepthToSpaceOperator(delegateData,
463 tfLiteContext,
464 tfLiteNode,
465 nodeIndex,
466 kTfLiteBuiltinDepthToSpace);
467 case kTfLiteBuiltinDepthwiseConv2d:
468 return VisitConvolutionOperator(delegateData,
469 tfLiteContext,
470 tfLiteNode,
471 nodeIndex,
472 kTfLiteBuiltinDepthwiseConv2d);
473 case kTfLiteBuiltinDequantize:
474 return VisitDequantizeOperator(delegateData,
475 tfLiteContext,
476 tfLiteNode,
477 nodeIndex,
478 kTfLiteBuiltinDequantize);
479 case kTfLiteBuiltinDiv:
480 return VisitElementwiseBinaryOperator(delegateData,
481 tfLiteContext,
482 tfLiteNode,
483 nodeIndex,
484 kTfLiteBuiltinDiv);
485 case kTfLiteBuiltinElu:
486 return VisitActivationOperator(delegateData,
487 tfLiteContext,
488 tfLiteNode,
489 nodeIndex,
490 kTfLiteBuiltinElu);
491 case kTfLiteBuiltinEqual:
492 return VisitComparisonOperator(delegateData,
493 tfLiteContext,
494 tfLiteNode,
495 nodeIndex,
496 kTfLiteBuiltinEqual);
497 case kTfLiteBuiltinExp:
498 return VisitElementwiseUnaryOperator(delegateData,
499 tfLiteContext,
500 tfLiteNode,
501 nodeIndex,
502 armnn::UnaryOperation::Exp);
503 case kTfLiteBuiltinExpandDims:
504 return VisitExpandDimsOperator(delegateData,
505 tfLiteContext,
506 tfLiteNode,
507 nodeIndex,
508 kTfLiteBuiltinExpandDims);
509 case kTfLiteBuiltinFill:
510 return VisitFillOperator(delegateData,
511 tfLiteContext,
512 tfLiteNode,
513 nodeIndex,
514 kTfLiteBuiltinFill);
515 case kTfLiteBuiltinFloor:
516 return VisitFloorOperator(delegateData,
517 tfLiteContext,
518 tfLiteNode,
519 nodeIndex,
520 kTfLiteBuiltinFloor);
521 case kTfLiteBuiltinFullyConnected:
522 return VisitFullyConnectedOperator(delegateData,
523 tfLiteContext,
524 tfLiteNode,
525 nodeIndex,
526 kTfLiteBuiltinFullyConnected);
527 case kTfLiteBuiltinGather:
528 return VisitGatherOperator(delegateData,
529 tfLiteContext,
530 tfLiteNode,
531 nodeIndex,
532 kTfLiteBuiltinGather);
533 case kTfLiteBuiltinGatherNd:
534 return VisitGatherOperator(delegateData,
535 tfLiteContext,
536 tfLiteNode,
537 nodeIndex,
538 kTfLiteBuiltinGatherNd);
539 case kTfLiteBuiltinGreater:
540 return VisitComparisonOperator(delegateData,
541 tfLiteContext,
542 tfLiteNode,
543 nodeIndex,
544 kTfLiteBuiltinGreater);
545 case kTfLiteBuiltinGreaterEqual:
546 return VisitComparisonOperator(delegateData,
547 tfLiteContext,
548 tfLiteNode,
549 nodeIndex,
550 kTfLiteBuiltinGreaterEqual);
551 case kTfLiteBuiltinHardSwish:
552 return VisitActivationOperator(delegateData,
553 tfLiteContext,
554 tfLiteNode,
555 nodeIndex,
556 kTfLiteBuiltinHardSwish);
557 case kTfLiteBuiltinL2Normalization:
558 return VisitNormalizationOperator(delegateData,
559 tfLiteContext,
560 tfLiteNode,
561 nodeIndex,
562 kTfLiteBuiltinL2Normalization);
563 case kTfLiteBuiltinL2Pool2d:
564 return VisitPoolingOperator(delegateData,
565 tfLiteContext,
566 tfLiteNode,
567 nodeIndex,
568 kTfLiteBuiltinL2Pool2d);
569 case kTfLiteBuiltinLess:
570 return VisitComparisonOperator(delegateData,
571 tfLiteContext,
572 tfLiteNode,
573 nodeIndex,
574 kTfLiteBuiltinLess);
575 case kTfLiteBuiltinLessEqual:
576 return VisitComparisonOperator(delegateData,
577 tfLiteContext,
578 tfLiteNode,
579 nodeIndex,
580 kTfLiteBuiltinLessEqual);
581 case kTfLiteBuiltinLocalResponseNormalization:
582 return VisitNormalizationOperator(delegateData,
583 tfLiteContext,
584 tfLiteNode,
585 nodeIndex,
586 kTfLiteBuiltinLocalResponseNormalization);
Matthew Sloyanc8eb9552020-11-26 10:54:22 +0000587 case kTfLiteBuiltinLogicalAnd:
588 return VisitLogicalBinaryOperator(delegateData,
589 tfLiteContext,
590 tfLiteNode,
591 nodeIndex,
592 kTfLiteBuiltinLogicalAnd,
593 armnn::LogicalBinaryOperation::LogicalAnd);
594 case kTfLiteBuiltinLogicalNot:
595 return VisitElementwiseUnaryOperator(delegateData,
596 tfLiteContext,
597 tfLiteNode,
598 nodeIndex,
599 armnn::UnaryOperation::LogicalNot);
600 case kTfLiteBuiltinLogicalOr:
601 return VisitLogicalBinaryOperator(delegateData,
602 tfLiteContext,
603 tfLiteNode,
604 nodeIndex,
605 kTfLiteBuiltinLogicalOr,
606 armnn::LogicalBinaryOperation::LogicalOr);
Sadik Armagan62483be2020-10-23 17:14:43 +0100607 case kTfLiteBuiltinLogistic:
608 return VisitActivationOperator(delegateData,
609 tfLiteContext,
610 tfLiteNode,
611 nodeIndex,
612 kTfLiteBuiltinLogistic);
613 case kTfLiteBuiltinLogSoftmax:
614 return VisitSoftmaxOperator(delegateData,
615 tfLiteContext,
616 tfLiteNode,
617 nodeIndex,
618 kTfLiteBuiltinLogSoftmax);
619 case kTfLiteBuiltinLstm:
620 return VisitLstmOperator(delegateData,
621 tfLiteContext,
622 tfLiteNode,
623 nodeIndex,
624 kTfLiteBuiltinLstm);
625 case kTfLiteBuiltinMaxPool2d:
626 return VisitPoolingOperator(delegateData,
627 tfLiteContext,
628 tfLiteNode,
629 nodeIndex,
630 kTfLiteBuiltinMaxPool2d);
631 case kTfLiteBuiltinMaximum:
632 return VisitElementwiseBinaryOperator(delegateData,
633 tfLiteContext,
634 tfLiteNode,
635 nodeIndex,
636 kTfLiteBuiltinMaximum);
637 case kTfLiteBuiltinMean:
638 return VisitControlOperator(delegateData,
639 tfLiteContext,
640 tfLiteNode,
641 nodeIndex,
642 kTfLiteBuiltinMean);
643 case kTfLiteBuiltinMinimum:
644 return VisitElementwiseBinaryOperator(delegateData,
645 tfLiteContext,
646 tfLiteNode,
647 nodeIndex,
648 kTfLiteBuiltinMinimum);
649 case kTfLiteBuiltinMul:
650 return VisitElementwiseBinaryOperator(delegateData,
651 tfLiteContext,
652 tfLiteNode,
653 nodeIndex,
654 kTfLiteBuiltinMul);
655 case kTfLiteBuiltinNeg:
656 return VisitElementwiseUnaryOperator(delegateData,
657 tfLiteContext,
658 tfLiteNode,
659 nodeIndex,
660 armnn::UnaryOperation::Neg);
661 case kTfLiteBuiltinNotEqual:
662 return VisitComparisonOperator(delegateData,
663 tfLiteContext,
664 tfLiteNode,
665 nodeIndex,
666 kTfLiteBuiltinNotEqual);
667 case kTfLiteBuiltinPad:
668 return VisitPadOperator(delegateData,
669 tfLiteContext,
670 tfLiteNode,
671 nodeIndex,
672 kTfLiteBuiltinPad);
673 case kTfLiteBuiltinPadv2:
674 return VisitPadOperator(delegateData,
675 tfLiteContext,
676 tfLiteNode,
677 nodeIndex,
678 kTfLiteBuiltinPadv2);
679 case kTfLiteBuiltinPrelu:
680 return VisitActivationOperator(delegateData,
681 tfLiteContext,
682 tfLiteNode,
683 nodeIndex,
684 kTfLiteBuiltinPrelu);
685 case kTfLiteBuiltinQuantize:
686 return VisitQuantizeOperator(delegateData,
687 tfLiteContext,
688 tfLiteNode,
689 nodeIndex,
690 kTfLiteBuiltinQuantize);
691 case kTfLiteBuiltinRank:
692 return VisitControlOperator(delegateData,
693 tfLiteContext,
694 tfLiteNode,
695 nodeIndex,
696 kTfLiteBuiltinRank);
697 case kTfLiteBuiltinRelu:
698 return VisitActivationOperator(delegateData,
699 tfLiteContext,
700 tfLiteNode,
701 nodeIndex,
702 kTfLiteBuiltinRelu);
703 case kTfLiteBuiltinReluN1To1:
704 return VisitActivationOperator(delegateData,
705 tfLiteContext,
706 tfLiteNode,
707 nodeIndex,
708 kTfLiteBuiltinReluN1To1);
709 case kTfLiteBuiltinRelu6:
710 return VisitActivationOperator(delegateData,
711 tfLiteContext,
712 tfLiteNode,
713 nodeIndex,
714 kTfLiteBuiltinRelu6);
715 case kTfLiteBuiltinReshape:
716 return VisitReshapeOperator(delegateData,
717 tfLiteContext,
718 tfLiteNode,
719 nodeIndex,
720 kTfLiteBuiltinReshape);
721 case kTfLiteBuiltinResizeBilinear:
722 return VisitResizeOperator(delegateData,
723 tfLiteContext,
724 tfLiteNode,
725 nodeIndex,
726 kTfLiteBuiltinResizeBilinear);
727 case kTfLiteBuiltinResizeNearestNeighbor:
728 return VisitResizeOperator(delegateData,
729 tfLiteContext,
730 tfLiteNode,
731 nodeIndex,
732 kTfLiteBuiltinResizeNearestNeighbor);
733 case kTfLiteBuiltinRsqrt:
734 return VisitElementwiseUnaryOperator(delegateData,
735 tfLiteContext,
736 tfLiteNode,
737 nodeIndex,
738 armnn::UnaryOperation::Rsqrt);
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000739 case kTfLiteBuiltinSplit:
740 return VisitSplitOperator(delegateData,
741 tfLiteContext,
742 tfLiteNode,
743 nodeIndex,
744 kTfLiteBuiltinSplit);
745 case kTfLiteBuiltinSplitV:
746 return VisitSplitVOperator(delegateData,
747 tfLiteContext,
748 tfLiteNode,
749 nodeIndex,
750 kTfLiteBuiltinSplitV);
Sadik Armagan62483be2020-10-23 17:14:43 +0100751 case kTfLiteBuiltinSqrt:
752 return VisitElementwiseUnaryOperator(delegateData,
753 tfLiteContext,
754 tfLiteNode,
755 nodeIndex,
756 armnn::UnaryOperation::Sqrt);
757 case kTfLiteBuiltinSqueeze:
758 return VisitSqueezeOperator(delegateData,
759 tfLiteContext,
760 tfLiteNode,
761 nodeIndex,
762 kTfLiteBuiltinSqueeze);
763 case kTfLiteBuiltinStridedSlice:
764 return VisitSliceOperator(delegateData,
765 tfLiteContext,
766 tfLiteNode,
767 nodeIndex,
768 kTfLiteBuiltinStridedSlice);
769 case kTfLiteBuiltinTranspose:
770 return VisitTransposeOperator(delegateData,
771 tfLiteContext,
772 tfLiteNode,
773 nodeIndex,
774 kTfLiteBuiltinTranspose);
775 case kTfLiteBuiltinTransposeConv:
776 return VisitConvolutionOperator(delegateData,
777 tfLiteContext,
778 tfLiteNode,
779 nodeIndex,
780 kTfLiteBuiltinTransposeConv);
781 case kTfLiteBuiltinSoftmax:
782 return VisitSoftmaxOperator(delegateData,
783 tfLiteContext,
784 tfLiteNode,
785 nodeIndex,
786 kTfLiteBuiltinSoftmax);
787 case kTfLiteBuiltinSpaceToBatchNd:
788 return VisitSpaceToBatchNdOperator(delegateData,
789 tfLiteContext,
790 tfLiteNode,
791 nodeIndex,
792 kTfLiteBuiltinSpaceToBatchNd);
793 case kTfLiteBuiltinSpaceToDepth:
794 return VisitSpaceToDepthOperator(delegateData,
795 tfLiteContext,
796 tfLiteNode,
797 nodeIndex,
798 kTfLiteBuiltinSpaceToDepth);
799 case kTfLiteBuiltinSub:
800 return VisitElementwiseBinaryOperator(delegateData,
801 tfLiteContext,
802 tfLiteNode,
803 nodeIndex,
804 kTfLiteBuiltinSub);
805 case kTfLiteBuiltinTanh:
806 return VisitActivationOperator(delegateData,
807 tfLiteContext,
808 tfLiteNode,
809 nodeIndex,
810 kTfLiteBuiltinTanh);
811 default:
812 return kTfLiteError;
813 }
Sadik Armagan3c24f432020-10-19 17:35:30 +0100814}
815
816} // armnnDelegate namespace