blob: 8cdf01ffc30eebd9a1d3a388c38c6f2860951981 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Ryan OSheaac9607f2023-04-03 11:33:33 +01007#include <OpaqueDelegateUtils.hpp>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00008
9#include <Version.hpp>
10
11#include "Activation.hpp"
12#include "ArgMinMax.hpp"
13#include "BatchMatMul.hpp"
14#include "BatchSpace.hpp"
15#include "Comparison.hpp"
16#include "Convolution.hpp"
17#include "Control.hpp"
18#include "ElementwiseBinary.hpp"
19#include "ElementwiseUnary.hpp"
20#include "Fill.hpp"
21#include "FullyConnected.hpp"
22#include "Gather.hpp"
23#include "GatherNd.hpp"
24#include "LogicalBinary.hpp"
25#include "Lstm.hpp"
26#include "Normalization.hpp"
27#include "Pack.hpp"
28#include "Pad.hpp"
29#include "Pooling.hpp"
30#include "Prelu.hpp"
31#include "Quantization.hpp"
32#include "Redefine.hpp"
33#include "Reduce.hpp"
34#include "Resize.hpp"
35#include "Round.hpp"
36#include "Shape.hpp"
37#include "Slice.hpp"
38#include "StridedSlice.hpp"
39#include "Softmax.hpp"
40#include "SpaceDepth.hpp"
41#include "Split.hpp"
42#include "Transpose.hpp"
43#include "UnidirectionalSequenceLstm.hpp"
44#include "Unpack.hpp"
45
46#include <armnn/utility/IgnoreUnused.hpp>
47#include <armnnUtils/Filesystem.hpp>
48#include <armnn/utility/Timer.hpp>
49#include <flatbuffers/flatbuffers.h>
50#include <tensorflow/lite/context_util.h>
51#include <tensorflow/lite/schema/schema_generated.h>
52#include <tensorflow/lite/minimal_logging.h>
53#include <tensorflow/lite/logger.h>
54
55#include <algorithm>
56#include <iostream>
57#include <sstream>
58
59namespace armnnOpaqueDelegate
60{
61
Matthew Sloyan65c21a12023-04-04 12:06:14 +010062const TfLiteStableDelegate TFL_TheStableDelegate =
63{
64 /*delegate_abi_version=*/ TFL_STABLE_DELEGATE_ABI_VERSION,
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010065 /*delegate_name=*/ "armnn_delegate",
66 /*delegate_version=*/ OPAQUE_DELEGATE_VERSION,
Matthew Sloyan65c21a12023-04-04 12:06:14 +010067 /*delegate_plugin=*/ GetArmnnDelegatePluginApi()
68};
69
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010070static auto* g_delegate_plugin_ArmnnDelegatePlugin_ =
71 new tflite::delegates::DelegatePluginRegistry::Register(TFL_TheStableDelegate.delegate_name,
72 ArmnnDelegatePlugin::New);
73
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000074ArmnnOpaqueDelegate::ArmnnOpaqueDelegate(armnnDelegate::DelegateOptions options)
75 : m_Options(std::move(options))
76{
77 // Configures logging for ARMNN
78 if (m_Options.IsLoggingEnabled())
79 {
80 armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
81 }
82 // Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all armnn_delegate
83 // instances so the RuntimeOptions cannot be altered for different armnn_delegate instances.
84 m_Runtime = GetRuntime(m_Options.GetRuntimeOptions());
85 std::vector<armnn::BackendId> backends;
86 if (m_Runtime)
87 {
88 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
89 for (auto& backend : m_Options.GetBackends())
90 {
91 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
92 {
93 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
Teresa Charlinf69ae562023-04-27 14:42:23 +010094 "TfLiteArmnnOpaqueDelegate: Requested unknown backend %s", backend.Get().c_str());
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000095 }
96 else
97 {
98 backends.push_back(backend);
99 }
100 }
101 }
102
103 if (backends.empty())
104 {
105 // No known backend specified
106 throw armnn::InvalidArgumentException("TfLiteArmnnOpaqueDelegate: No known backend specified.");
107 }
108 m_Options.SetBackends(backends);
109
110 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnOpaqueDelegate: Created TfLite ArmNN delegate.");
111}
112
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100113TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate* tfLiteDelegate, void* data)
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100114{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100115 // We are required to have the void* data parameter in the function signature, but we don't actually use it.
116 armnn::IgnoreUnused(data);
117
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100118 TfLiteIntArray* supportedOperators =
119 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100120 (TfLiteOpaqueDelegateGetData(tfLiteDelegate))->IdentifyOperatorsToDelegate(tfLiteContext);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100121 if(supportedOperators == nullptr)
122 {
123 return kTfLiteError;
124 }
125
126 // ArmNN Opaque Delegate Registration
127 TfLiteRegistrationExternal* kernelRegistration =
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100128 TfLiteRegistrationExternalCreate(kTfLiteBuiltinDelegate,
129 TFL_TheStableDelegate.delegate_name,
130 /*version=*/OPAQUE_DELEGATE_MAJOR_VERSION);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100131 if(kernelRegistration == nullptr)
132 {
133 return kTfLiteError;
134 }
135
136 TfLiteRegistrationExternalSetInit(
137 kernelRegistration,
138 [](TfLiteOpaqueContext* tfLiteContext, const char* buffer, size_t length) -> void*
139 {
140 armnn::IgnoreUnused(length);
141 const TfLiteOpaqueDelegateParams* parameters =
142 reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
143 if(parameters == nullptr)
144 {
145 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
146 "TfLiteArmnnOpaqueDelegate: Unable to get parameters.");
147 return nullptr;
148 }
149
150 return static_cast<void*>(
151 ArmnnSubgraph::Create(tfLiteContext,
152 parameters,
153 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100154 parameters->delegate->opaque_delegate_builder->data)));
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100155 }
156 );
157
158 TfLiteRegistrationExternalSetFree(
159 kernelRegistration,
160 [](TfLiteOpaqueContext* tfLiteContext, void* buffer) -> void
161 {
162 armnn::IgnoreUnused(tfLiteContext);
163 if (buffer != nullptr)
164 {
165 delete static_cast<ArmnnSubgraph*>(buffer);
166 }
167 }
168 );
169
170 TfLiteRegistrationExternalSetPrepare(
171 kernelRegistration,
172 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
173 {
174 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
175 if (userData == nullptr)
176 {
177 return kTfLiteError;
178 }
179 return static_cast<ArmnnSubgraph*>(userData)->Prepare(tfLiteContext);
180 }
181 );
182
183 TfLiteRegistrationExternalSetInvoke(
184 kernelRegistration,
185 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
186 {
187 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
188 if (userData == nullptr)
189 {
190 return kTfLiteError;
191 }
192
193 return static_cast<ArmnnSubgraph*>(userData)->Invoke(tfLiteContext, tfLiteNode);
194 }
195 );
196
197 const TfLiteStatus status =
198 TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
199 tfLiteContext, kernelRegistration, supportedOperators, tfLiteDelegate);
200
201 TfLiteIntArrayFree(supportedOperators);
202 return status;
203}
204
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000205TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings)
206{
207 // This method will always create Opaque Delegate with default settings until
208 // we have a DelegateOptions Constructor which can parse the void* settings
209 armnn::IgnoreUnused(settings);
210 auto options = TfLiteArmnnDelegateOptionsDefault();
211 auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
212 return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
213}
214
215::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault()
216{
217 ::armnnDelegate::DelegateOptions options(armnn::Compute::CpuRef);
218 return options;
219}
220
221void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate)
222{
223 if (tfLiteDelegate != nullptr)
224 {
225 delete static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(TfLiteOpaqueDelegateGetData(tfLiteDelegate));
226 TfLiteOpaqueDelegateDelete(tfLiteDelegate);
227 }
228}
229
230const TfLiteOpaqueDelegatePlugin* GetArmnnDelegatePluginApi()
231{
232 static constexpr TfLiteOpaqueDelegatePlugin armnnPlugin{
233 TfLiteArmnnOpaqueDelegateCreate, TfLiteArmnnOpaqueDelegateDelete, TfLiteArmnnOpaqueDelegateErrno};
234 return &armnnPlugin;
235}
236
237const std::string ArmnnOpaqueDelegate::GetVersion() {
238 return OPAQUE_DELEGATE_VERSION;
239}
240
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100241TfLiteIntArray* ArmnnOpaqueDelegate::IdentifyOperatorsToDelegate(TfLiteOpaqueContext* tfLiteContext)
242{
243 TfLiteIntArray* executionPlan = nullptr;
244 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
245 {
246 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unable to get graph execution plan.");
247 return nullptr;
248 }
249
250 // Delegate data with null network
251 DelegateData delegateData(m_Options.GetBackends());
252
253 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
254 if (nodesToDelegate == nullptr)
255 {
256 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
257 "TfLiteArmnnOpaqueDelegate: Unable to create int array from execution plan.");
258 return nullptr;
259 }
260 nodesToDelegate->size = 0;
261
262 std::set<int32_t> unsupportedOperators;
263
264 for (int i = 0; i < executionPlan->size; ++i)
265 {
266 const int nodeIndex = executionPlan->data[i];
267
268 // If TfLiteOpaqueNodes can be delegated to ArmNN
269 TfLiteOpaqueNode* tfLiteNode = nullptr;
270 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
271
272 if (TfLiteOpaqueContextGetNodeAndRegistration(
273 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
274 {
275 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
276 "TfLiteArmnnOpaqueDelegate: Unable to get node and registration for node %d.",
277 nodeIndex);
278 continue;
279 }
280
281 TfLiteStatus visitStatus;
282 try
283 {
284 visitStatus = ArmnnSubgraph::VisitNode(
285 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex);
286 }
287 catch(std::exception& ex)
288 {
289 ARMNN_LOG(error) << "ArmNN Failed to visit node with error: " << ex.what();
290 visitStatus = kTfLiteError;
291 }
292
293 if (visitStatus != kTfLiteOk)
294 {
295 // node is not supported by ArmNN
296 unsupportedOperators.insert(TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration));
297 continue;
298 }
299
300 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
301 }
302
303 for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
304 {
305 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
306 "Operator %s [%d] is not supported by armnn_opaque_delegate.",
307 tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
308 *it);
309 }
310
311 if (!unsupportedOperators.empty() && m_Options.TfLiteRuntimeFallbackDisabled())
312 {
313 std::stringstream exMessage;
314 exMessage << "TfLiteArmnnOpaqueDelegate: There are unsupported operators in the model. ";
315 exMessage << "Not falling back to TfLite Runtime as fallback is disabled. ";
316 exMessage << "This should only be disabled under test conditions.";
317 throw armnn::Exception(exMessage.str());
318 }
319 if (nodesToDelegate->size == 0)
320 {
321 ARMNN_LOG(info) << "No operators in this model are supported by the Arm NN TfLite delegate." <<
322 " The model will be executed entirely by TfLite runtime.";
323 }
324
325 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
326 return nodesToDelegate;
327}
328
Ryan OSheaac9607f2023-04-03 11:33:33 +0100329TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
330 TfLiteOpaqueContext* tfLiteContext,
331 const TfLiteIntArray* inputs,
332 std::vector<armnn::BindingPointInfo>& inputBindings)
333{
334 const size_t numInputs = static_cast<size_t>(inputs->size);
335 for (unsigned int i = 0; i < numInputs; ++i)
336 {
337 const int32_t tensorId = inputs->data[i];
338 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
339
340 if(!tensor)
341 {
342 return kTfLiteError;
343 }
344
345 // Do not create bindings for constant inputs
346 if (TfLiteOpaqueTensorGetAllocationType(tensor) == kTfLiteMmapRo)
347 {
348 continue;
349 }
350
351 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
352 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
353
354 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
355 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
356 outputSlot.SetTensorInfo(tensorInfo);
357
358 // Store for creating connections
359 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
360
361 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
362 }
363
364 return kTfLiteOk;
365}
366
367TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
368 TfLiteOpaqueContext* tfLiteContext,
369 const TfLiteIntArray* outputs,
370 std::vector<armnn::BindingPointInfo>& outputBindings)
371{
372 const size_t numOutputs = static_cast<size_t>(outputs->size);
373 for (unsigned int i = 0; i < numOutputs; ++i)
374 {
375 const int32_t tensorId = outputs->data[i];
376 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
377
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100378 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100379 {
380 return kTfLiteError;
381 }
382
383 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
384 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
385
386 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
387 ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] != nullptr);
388 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
389 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
390 }
391
392 return kTfLiteOk;
393}
394
395ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext,
396 const TfLiteOpaqueDelegateParams* parameters,
397 const ArmnnOpaqueDelegate* delegate)
398{
399 const auto startTime = armnn::GetTimeNow();
400 ARMNN_LOG(info) << "ArmnnSubgraph creation";
401
402 TfLiteIntArray* executionPlan;
403 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
404 {
405 return nullptr;
406 }
407
408 // Initialize DelegateData holds network and output slots information
409 DelegateData delegateData(delegate->m_Options.GetBackends());
410
411 // Build ArmNN Network
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000412 armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions();
Ryan OSheaac9607f2023-04-03 11:33:33 +0100413 armnn::NetworkId networkId;
414 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
415
416 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(
417 TfLiteOpaqueContextGetNumTensors(tfLiteContext), nullptr);
418
419 std::vector<armnn::BindingPointInfo> inputBindings;
420 std::vector<armnn::BindingPointInfo> outputBindings;
421
422 // Add input layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100423 if (AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100424 {
425 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Inputs to the network!");
426 }
427
428 // Parse TfLite delegate nodes to ArmNN
429 const auto parseStartTime = armnn::GetTimeNow();
430 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
431 {
432 const int nodeIndex = parameters->nodes_to_replace->data[i];
433
434 TfLiteOpaqueNode* tfLiteNode = nullptr;
435 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
436 if (TfLiteOpaqueContextGetNodeAndRegistration(
437 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
438 {
439 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to get node registration: " [ nodeIndex]);
440 }
441
442 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
443 {
444 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to parse node: " [ nodeIndex]);
445 }
446 }
447 ARMNN_LOG(info) << "Parse nodes to ArmNN time: " << std::setprecision(2)
448 << std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
449
450 // Add Output layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100451 if (AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100452 {
453 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Outputs to the network!");
454 }
455
456 // Optimize ArmNN network
457 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
458 try
459 {
460 const auto optimizeStartTime = armnn::GetTimeNow();
461 optNet = armnn::Optimize(*(delegateData.m_Network.get()),
462 delegate->m_Options.GetBackends(),
463 delegate->m_Runtime->GetDeviceSpec(),
464 delegate->m_Options.GetOptimizerOptions());
465 ARMNN_LOG(info) << "Optimize ArmnnSubgraph time: " << std::setprecision(2)
466 << std::fixed << armnn::GetTimeDuration(optimizeStartTime).count() << " ms";
467 }
468 catch (std::exception& ex)
469 {
470 std::stringstream exMessage;
471 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from optimize.";
472 throw armnn::Exception(exMessage.str());
473 }
474 if (!optNet)
475 {
476 // Optimize failed
477 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to optimize the network!");
478 }
479
480 // If set, we will serialize the optimized model into a dot file.
481 const std::string serializeToDotFile = delegate->m_Options.GetSerializeToDot();
482 if (!serializeToDotFile.empty())
483 {
484 ARMNN_LOG(info) << "Writing graph to dot file: " << serializeToDotFile;
485 fs::path filename = serializeToDotFile;
486 std::fstream file(filename.c_str(), std::ios_base::out);
487 optNet->SerializeToDot(file);
488 }
489
490 try
491 {
492 const auto loadStartTime = armnn::GetTimeNow();
493
494 // Load graph into runtime
495 std::string errorMessage;
496 armnn::Status loadingStatus;
497 armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
498 armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
499 // There's a bit of an assumption here that the delegate will only support Malloc memory source.
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000500 if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100501 {
502 inputSource = armnn::MemorySource::Malloc;
503 }
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000504 if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100505 {
506 outputSource = armnn::MemorySource::Malloc;
507 }
508 armnn::INetworkProperties networkProperties(false,
509 inputSource,
510 outputSource,
511 delegate->m_Options.GetInternalProfilingState(),
512 delegate->m_Options.GetInternalProfilingDetail());
513 loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
514 std::move(optNet),
515 errorMessage,
516 networkProperties);
517 if (loadingStatus != armnn::Status::Success)
518 {
519 // Network load failed.
520 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Network could not be loaded: " + errorMessage);
521 }
522
523 ARMNN_LOG(info) << "Load ArmnnSubgraph time: " << std::setprecision(2)
524 << std::fixed << armnn::GetTimeDuration(loadStartTime).count() << " ms";
525 }
526 catch (std::exception& ex)
527 {
528 std::stringstream exMessage;
529 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
530 throw armnn::Exception(exMessage.str());
531 }
532
533 // Register debug callback function
534 if (delegate->m_Options.GetDebugCallbackFunction().has_value())
535 {
536 delegate->m_Runtime->RegisterDebugCallback(networkId, delegate->m_Options.GetDebugCallbackFunction().value());
537 }
538
539 ARMNN_LOG(info) << "Overall ArmnnSubgraph creation time: " << std::setprecision(2)
540 << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms\n";
541
542 // Create a new SubGraph with networkId and runtime
543 return new ArmnnSubgraph(networkId, delegate->m_Runtime, inputBindings, outputBindings);
544}
545
546TfLiteStatus ArmnnSubgraph::Prepare(TfLiteOpaqueContext* tfLiteContext)
547{
548 armnn::IgnoreUnused(tfLiteContext);
549 return kTfLiteOk;
550}
551
552TfLiteStatus ArmnnSubgraph::Invoke(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode)
553{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100554 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
555 // This function turns inputIndexArray into an int array of indices. These indices point to the tensors for
556 // each input slot in the node.
557 const int* inputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100558 int numInputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100559 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100560 {
561 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph inputs!");
562 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100563 // Prepare inputs
564 armnn::InputTensors inputTensors;
565 size_t inputIndex = 0;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100566 for (int inputIdx = 0; inputIdx < numInputs; inputIdx++)
567 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100568 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputIndexArray[inputIdx]);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100569
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100570 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100571 {
572 return kTfLiteError;
573 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100574 // If tensor is not read only
Ryan OSheaac9607f2023-04-03 11:33:33 +0100575 if (TfLiteOpaqueTensorGetAllocationType(tensor) != kTfLiteMmapRo)
576 {
577 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
578 armnn::TensorInfo inputTensorInfo = inputBinding.second;
579 inputTensorInfo.SetConstant(true);
580 const armnn::ConstTensor inputTensor(inputTensorInfo, TfLiteOpaqueTensorData(tensor));
581 inputTensors.emplace_back(inputIdx, inputTensor);
582
583 ++inputIndex;
584 }
585 }
586
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100587 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
588 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
589 // each output slot in the node.
590 const int* outputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100591 int numOutputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100592 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100593 {
594 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph outputs!");
595 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100596 // Assign the tensors from the outputIndexArray to the armnn BindingPointInfo
597 armnn::OutputTensors outputTensors;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100598 for (int outputIdx = 0; outputIdx < numOutputs; outputIdx++)
599 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100600 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIdx];
601 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputIndexArray[outputIdx]);
602 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100603 {
604 return kTfLiteError;
605 }
606
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100607 const armnn::Tensor outputTensor(outputBinding.second, reinterpret_cast<TfLiteTensor*>(tensor)->data
608 .data);
609 outputTensors.emplace_back(outputIndexArray[outputIdx], outputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100610 }
611
612 // Run graph
613 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
614 // The delegate holds its own Arm NN runtime so this is our last chance to print internal profiling data.
615 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
616 if (profiler && profiler->IsProfilingEnabled())
617 {
618 profiler->Print(std::cout);
619 }
620 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
621}
622
623TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
624 TfLiteOpaqueContext* tfLiteContext,
625 TfLiteRegistrationExternal* tfLiteRegistration,
626 TfLiteOpaqueNode* tfLiteNode,
627 int nodeIndex)
628{
629 switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
630 {
Teresa Charlinf69ae562023-04-27 14:42:23 +0100631 case kTfLiteBuiltinAbs:
632 return VisitElementwiseUnaryOperator(delegateData,
633 tfLiteContext,
634 tfLiteNode,
635 nodeIndex,
636 kTfLiteBuiltinAbs,
637 armnn::UnaryOperation::Abs);
David Monahan6c53f9f2023-04-27 15:21:19 +0100638 case kTfLiteBuiltinAdd:
639 return VisitElementwiseBinaryOperator(delegateData,
640 tfLiteContext,
641 tfLiteNode,
642 nodeIndex,
643 kTfLiteBuiltinAdd);
John Mcloughlin559d9092023-04-26 20:14:47 +0100644 case kTfLiteBuiltinArgMax:
645 return VisitArgMinMaxOperator(delegateData,
646 tfLiteContext,
647 tfLiteNode,
648 nodeIndex,
649 kTfLiteBuiltinArgMax);
650 case kTfLiteBuiltinArgMin:
651 return VisitArgMinMaxOperator(delegateData,
652 tfLiteContext,
653 tfLiteNode,
654 nodeIndex,
655 kTfLiteBuiltinArgMin);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100656 case kTfLiteBuiltinAveragePool2d:
657 return VisitPooling2dOperator(delegateData,
658 tfLiteContext,
659 tfLiteNode,
660 nodeIndex,
661 kTfLiteBuiltinAveragePool2d);
John Mcloughlin0422cf22023-04-27 16:55:00 +0100662 case kTfLiteBuiltinBatchMatmul:
663 return VisitBatchMatMulOperator(delegateData,
664 tfLiteContext,
665 tfLiteNode,
666 nodeIndex,
667 kTfLiteBuiltinBatchMatmul);
Kevin May81b66f32023-04-26 14:55:36 +0100668 case kTfLiteBuiltinBatchToSpaceNd:
669 return VisitBatchToSpaceNdOperator(delegateData,
670 tfLiteContext,
671 tfLiteNode,
672 nodeIndex,
673 kTfLiteBuiltinBatchToSpaceNd);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100674 case kTfLiteBuiltinCast:
675 return VisitCastOperator(delegateData,
676 tfLiteContext,
677 tfLiteNode,
678 nodeIndex,
679 kTfLiteBuiltinCast);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100680 case kTfLiteBuiltinCeil:
681 return VisitElementwiseUnaryOperator(delegateData,
682 tfLiteContext,
683 tfLiteNode,
684 nodeIndex,
685 kTfLiteBuiltinCeil,
686 armnn::UnaryOperation::Ceil);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100687 case kTfLiteBuiltinConcatenation:
688 return VisitControlOperator(delegateData,
689 tfLiteContext,
690 tfLiteNode,
691 nodeIndex,
692 kTfLiteBuiltinConcatenation);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100693 case kTfLiteBuiltinConv2d:
694 return VisitConvolutionOperator(delegateData,
695 tfLiteContext,
696 tfLiteNode,
697 nodeIndex,
698 kTfLiteBuiltinConv2d);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100699 case kTfLiteBuiltinConv3d:
700 return VisitConvolutionOperator(delegateData,
701 tfLiteContext,
702 tfLiteNode,
703 nodeIndex,
704 kTfLiteBuiltinConv3d);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100705 case kTfLiteBuiltinCustom:
706 {
707 // Custom operators are defined by the name rather than the builtin code.
708 // Parse the custom_name param in the registration to point to the correct visitor function.
709 std::string customOperatorName = TfLiteRegistrationExternalGetCustomName(tfLiteRegistration);
710 if ( customOperatorName == "AveragePool3D" )
711 {
712 return VisitPooling3dOperator(delegateData,
713 tfLiteContext,
714 tfLiteNode,
715 nodeIndex,
716 customOperatorName);
717 }
718 else if (customOperatorName == "MaxPool3D")
719 {
720 return VisitPooling3dOperator(delegateData,
721 tfLiteContext,
722 tfLiteNode,
723 nodeIndex,
724 customOperatorName);
725 }
726 // Invalid or unsupported custom operator
727 return kTfLiteError;
728 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100729 case kTfLiteBuiltinDepthwiseConv2d:
730 return VisitConvolutionOperator(delegateData,
731 tfLiteContext,
732 tfLiteNode,
733 nodeIndex,
734 kTfLiteBuiltinDepthwiseConv2d);
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100735 case kTfLiteBuiltinDequantize:
736 return VisitDequantizeOperator(delegateData,
737 tfLiteContext,
738 tfLiteNode,
739 nodeIndex,
740 kTfLiteBuiltinDequantize);
David Monahan6c53f9f2023-04-27 15:21:19 +0100741 case kTfLiteBuiltinDiv:
742 return VisitElementwiseBinaryOperator(delegateData,
743 tfLiteContext,
744 tfLiteNode,
745 nodeIndex,
746 kTfLiteBuiltinDiv);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100747 case kTfLiteBuiltinEqual:
748 return VisitComparisonOperator(delegateData,
749 tfLiteContext,
750 tfLiteNode,
751 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100752 kTfLiteBuiltinEqual,
753 armnn::ComparisonOperation::Equal);
Teresa Charlin42362962023-04-28 14:23:33 +0100754 case kTfLiteBuiltinDepthToSpace:
755 return VisitDepthToSpaceOperator(delegateData,
756 tfLiteContext,
757 tfLiteNode,
758 nodeIndex,
759 kTfLiteBuiltinDepthToSpace);
760 case kTfLiteBuiltinElu:
761 return VisitActivationOperator(delegateData,
762 tfLiteContext,
763 tfLiteNode,
764 nodeIndex,
765 kTfLiteBuiltinElu);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100766 case kTfLiteBuiltinExp:
767 return VisitElementwiseUnaryOperator(delegateData,
768 tfLiteContext,
769 tfLiteNode,
770 nodeIndex,
771 kTfLiteBuiltinExp,
772 armnn::UnaryOperation::Exp);
Matthew Sloyan3504e422023-05-03 13:53:02 +0100773 case kTfLiteBuiltinExpandDims:
774 return VisitExpandDimsOperator(delegateData,
775 tfLiteContext,
776 tfLiteNode,
777 nodeIndex,
778 kTfLiteBuiltinExpandDims);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100779 case kTfLiteBuiltinFloor:
780 return VisitFloorOperator(delegateData,
781 tfLiteContext,
782 tfLiteNode,
783 nodeIndex,
784 kTfLiteBuiltinFloor);
David Monahan6c53f9f2023-04-27 15:21:19 +0100785 case kTfLiteBuiltinFloorDiv:
786 return VisitElementwiseBinaryOperator(delegateData,
787 tfLiteContext,
788 tfLiteNode,
789 nodeIndex,
790 kTfLiteBuiltinFloorDiv);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100791 case kTfLiteBuiltinFullyConnected:
792 return VisitFullyConnectedOperator(delegateData,
793 tfLiteContext,
794 tfLiteNode,
795 nodeIndex,
796 kTfLiteBuiltinFullyConnected);
Kevin Mayb2831c52023-04-26 17:27:24 +0100797 case kTfLiteBuiltinGather:
798 return VisitGatherOperator(delegateData,
799 tfLiteContext,
800 tfLiteNode,
801 nodeIndex,
802 kTfLiteBuiltinGather);
803 case kTfLiteBuiltinGatherNd:
804 return VisitGatherNdOperator(delegateData,
805 tfLiteContext,
806 tfLiteNode,
807 nodeIndex,
808 kTfLiteBuiltinGatherNd);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100809 case kTfLiteBuiltinGreater:
810 return VisitComparisonOperator(delegateData,
811 tfLiteContext,
812 tfLiteNode,
813 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100814 kTfLiteBuiltinGreater,
815 armnn::ComparisonOperation::Greater);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100816 case kTfLiteBuiltinGreaterEqual:
817 return VisitComparisonOperator(delegateData,
818 tfLiteContext,
819 tfLiteNode,
820 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100821 kTfLiteBuiltinGreaterEqual,
822 armnn::ComparisonOperation::GreaterOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100823 case kTfLiteBuiltinHardSwish:
824 return VisitActivationOperator(delegateData,
825 tfLiteContext,
826 tfLiteNode,
827 nodeIndex,
828 kTfLiteBuiltinHardSwish);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100829 case kTfLiteBuiltinL2Normalization:
830 return VisitL2NormalizationOperator(delegateData,
831 tfLiteContext,
832 tfLiteNode,
833 nodeIndex,
834 kTfLiteBuiltinL2Normalization);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100835 case kTfLiteBuiltinL2Pool2d:
836 return VisitPooling2dOperator(delegateData,
837 tfLiteContext,
838 tfLiteNode,
839 nodeIndex,
840 kTfLiteBuiltinL2Pool2d);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100841 case kTfLiteBuiltinLess:
842 return VisitComparisonOperator(delegateData,
843 tfLiteContext,
844 tfLiteNode,
845 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100846 kTfLiteBuiltinLess,
847 armnn::ComparisonOperation::Less);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100848 case kTfLiteBuiltinLessEqual:
849 return VisitComparisonOperator(delegateData,
850 tfLiteContext,
851 tfLiteNode,
852 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100853 kTfLiteBuiltinLessEqual,
854 armnn::ComparisonOperation::LessOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100855 case kTfLiteBuiltinLogistic:
856 return VisitActivationOperator(delegateData,
857 tfLiteContext,
858 tfLiteNode,
859 nodeIndex,
860 kTfLiteBuiltinLogistic);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100861 case kTfLiteBuiltinLocalResponseNormalization:
862 return VisitLocalResponseNormalizationOperator(delegateData,
863 tfLiteContext,
864 tfLiteNode,
865 nodeIndex,
866 kTfLiteBuiltinLocalResponseNormalization);
867 case kTfLiteBuiltinLog:
868 return VisitElementwiseUnaryOperator(delegateData,
869 tfLiteContext,
870 tfLiteNode,
871 nodeIndex,
872 kTfLiteBuiltinLog,
873 armnn::UnaryOperation::Log);
874 case kTfLiteBuiltinLogicalAnd:
875 return VisitLogicalBinaryOperator(delegateData,
876 tfLiteContext,
877 tfLiteNode,
878 nodeIndex,
879 kTfLiteBuiltinLogicalAnd,
880 armnn::LogicalBinaryOperation::LogicalAnd);
881 case kTfLiteBuiltinLogicalNot:
882 return VisitElementwiseUnaryOperator(delegateData,
883 tfLiteContext,
884 tfLiteNode,
885 nodeIndex,
886 kTfLiteBuiltinLogicalNot,
887 armnn::UnaryOperation::LogicalNot);
888 case kTfLiteBuiltinLogicalOr:
889 return VisitLogicalBinaryOperator(delegateData,
890 tfLiteContext,
891 tfLiteNode,
892 nodeIndex,
893 kTfLiteBuiltinLogicalOr,
894 armnn::LogicalBinaryOperation::LogicalOr);
Teresa Charlin42362962023-04-28 14:23:33 +0100895 case kTfLiteBuiltinLogSoftmax:
896 return VisitSoftmaxOperator(delegateData,
897 tfLiteContext,
898 tfLiteNode,
899 nodeIndex,
900 kTfLiteBuiltinLogSoftmax);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100901 case kTfLiteBuiltinLstm:
902 return VisitLstmOperator(delegateData,
903 tfLiteContext,
904 tfLiteNode,
905 nodeIndex,
906 kTfLiteBuiltinLstm);
907 case kTfLiteBuiltinMaxPool2d:
908 return VisitPooling2dOperator(delegateData,
909 tfLiteContext,
910 tfLiteNode,
911 nodeIndex,
912 kTfLiteBuiltinMaxPool2d);
David Monahan6c53f9f2023-04-27 15:21:19 +0100913 case kTfLiteBuiltinMaximum:
914 return VisitElementwiseBinaryOperator(delegateData,
915 tfLiteContext,
916 tfLiteNode,
917 nodeIndex,
918 kTfLiteBuiltinMaximum);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100919 case kTfLiteBuiltinMean:
920 return VisitControlOperator(delegateData,
921 tfLiteContext,
922 tfLiteNode,
923 nodeIndex,
924 kTfLiteBuiltinMean);
David Monahan6c53f9f2023-04-27 15:21:19 +0100925 case kTfLiteBuiltinMinimum:
926 return VisitElementwiseBinaryOperator(delegateData,
927 tfLiteContext,
928 tfLiteNode,
929 nodeIndex,
930 kTfLiteBuiltinMinimum);
931 case kTfLiteBuiltinMul:
932 return VisitElementwiseBinaryOperator(delegateData,
933 tfLiteContext,
934 tfLiteNode,
935 nodeIndex,
936 kTfLiteBuiltinMul);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100937 case kTfLiteBuiltinNeg:
938 return VisitElementwiseUnaryOperator(delegateData,
939 tfLiteContext,
940 tfLiteNode,
941 nodeIndex,
942 kTfLiteBuiltinNeg,
943 armnn::UnaryOperation::Neg);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100944 case kTfLiteBuiltinNotEqual:
945 return VisitComparisonOperator(delegateData,
946 tfLiteContext,
947 tfLiteNode,
948 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100949 kTfLiteBuiltinNotEqual,
950 armnn::ComparisonOperation::NotEqual);
Teresa Charlinecebb0f2023-04-27 21:37:56 +0100951 case kTfLiteBuiltinPack:
952 return VisitPackOperator(delegateData,
953 tfLiteContext,
954 tfLiteNode,
955 nodeIndex,
956 kTfLiteBuiltinPack);
957 case kTfLiteBuiltinPad:
958 return VisitPadOperator(delegateData,
959 tfLiteContext,
960 tfLiteNode,
961 nodeIndex,
962 kTfLiteBuiltinPad);
963 case kTfLiteBuiltinPadv2:
964 return VisitPadOperator(delegateData,
965 tfLiteContext,
966 tfLiteNode,
967 nodeIndex,
968 kTfLiteBuiltinPadv2);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100969 case kTfLiteBuiltinPrelu:
970 return VisitPreluOperator(delegateData,
971 tfLiteContext,
972 tfLiteNode,
973 nodeIndex,
974 kTfLiteBuiltinPrelu);
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100975 case kTfLiteBuiltinQuantize:
976 return VisitQuantizeOperator(delegateData,
977 tfLiteContext,
978 tfLiteNode,
979 nodeIndex,
980 kTfLiteBuiltinQuantize);
John Mcloughlin083586d2023-04-28 18:36:52 +0100981 case kTfLiteBuiltinReduceMax:
982 return VisitReduceOperator(delegateData,
983 tfLiteContext,
984 tfLiteNode,
985 nodeIndex,
986 kTfLiteBuiltinReduceMax);
987 case kTfLiteBuiltinReduceMin:
988 return VisitReduceOperator(delegateData,
989 tfLiteContext,
990 tfLiteNode,
991 nodeIndex,
992 kTfLiteBuiltinReduceMin);
993 case kTfLiteBuiltinReduceProd:
994 return VisitReduceOperator(delegateData,
995 tfLiteContext,
996 tfLiteNode,
997 nodeIndex,
998 kTfLiteBuiltinReduceProd);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100999 case kTfLiteBuiltinRelu:
1000 return VisitActivationOperator(delegateData,
1001 tfLiteContext,
1002 tfLiteNode,
1003 nodeIndex,
1004 kTfLiteBuiltinRelu);
1005 case kTfLiteBuiltinReluN1To1:
1006 return VisitActivationOperator(delegateData,
1007 tfLiteContext,
1008 tfLiteNode,
1009 nodeIndex,
1010 kTfLiteBuiltinReluN1To1);
1011 case kTfLiteBuiltinRelu6:
1012 return VisitActivationOperator(delegateData,
1013 tfLiteContext,
1014 tfLiteNode,
1015 nodeIndex,
1016 kTfLiteBuiltinRelu6);
Matthew Sloyanc49aacc2023-04-28 17:27:26 +01001017 case kTfLiteBuiltinReshape:
1018 return VisitReshapeOperator(delegateData,
1019 tfLiteContext,
1020 tfLiteNode,
1021 nodeIndex,
1022 kTfLiteBuiltinReshape);
John Mcloughlin083586d2023-04-28 18:36:52 +01001023 case kTfLiteBuiltinResizeNearestNeighbor:
1024 return VisitResizeOperator(delegateData,
1025 tfLiteContext,
1026 tfLiteNode,
1027 nodeIndex,
1028 kTfLiteBuiltinResizeNearestNeighbor);
1029 case kTfLiteBuiltinResizeBilinear:
1030 return VisitResizeOperator(delegateData,
1031 tfLiteContext,
1032 tfLiteNode,
1033 nodeIndex,
1034 kTfLiteBuiltinResizeBilinear);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001035 case kTfLiteBuiltinRsqrt:
1036 return VisitElementwiseUnaryOperator(delegateData,
1037 tfLiteContext,
1038 tfLiteNode,
1039 nodeIndex,
1040 kTfLiteBuiltinRsqrt,
1041 armnn::UnaryOperation::Rsqrt);
John Mcloughlin0422cf22023-04-27 16:55:00 +01001042 case kTfLiteBuiltinShape:
1043 return VisitShapeOperator(delegateData,
1044 tfLiteContext,
1045 tfLiteNode,
1046 nodeIndex,
1047 kTfLiteBuiltinShape);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001048 case kTfLiteBuiltinSin:
1049 return VisitElementwiseUnaryOperator(delegateData,
1050 tfLiteContext,
1051 tfLiteNode,
1052 nodeIndex,
1053 kTfLiteBuiltinSin,
1054 armnn::UnaryOperation::Sin);
Teresa Charlin86b03572023-04-28 13:19:12 +01001055 case kTfLiteBuiltinSlice:
1056 return VisitSliceOperator(delegateData,
1057 tfLiteContext,
1058 tfLiteNode,
1059 nodeIndex,
1060 kTfLiteBuiltinSlice);
Teresa Charlin42362962023-04-28 14:23:33 +01001061 case kTfLiteBuiltinSoftmax:
1062 return VisitSoftmaxOperator(delegateData,
1063 tfLiteContext,
1064 tfLiteNode,
1065 nodeIndex,
1066 kTfLiteBuiltinSoftmax);
Kevin May81b66f32023-04-26 14:55:36 +01001067 case kTfLiteBuiltinSpaceToBatchNd:
1068 return VisitSpaceToBatchNdOperator(delegateData,
1069 tfLiteContext,
1070 tfLiteNode,
1071 nodeIndex,
1072 kTfLiteBuiltinSpaceToBatchNd);
Teresa Charlin42362962023-04-28 14:23:33 +01001073 case kTfLiteBuiltinSpaceToDepth:
1074 return VisitSpaceToDepthOperator(delegateData,
1075 tfLiteContext,
1076 tfLiteNode,
1077 nodeIndex,
1078 kTfLiteBuiltinSpaceToDepth);
David Monahanc833cef2023-05-03 15:53:03 +01001079 case kTfLiteBuiltinSplit:
1080 return VisitSplitOperator(delegateData,
1081 tfLiteContext,
1082 tfLiteNode,
1083 nodeIndex,
1084 kTfLiteBuiltinSplit);
1085 case kTfLiteBuiltinSplitV:
1086 return VisitSplitVOperator(delegateData,
1087 tfLiteContext,
1088 tfLiteNode,
1089 nodeIndex,
1090 kTfLiteBuiltinSplitV);
David Monahan6c53f9f2023-04-27 15:21:19 +01001091 case kTfLiteBuiltinSub:
1092 return VisitElementwiseBinaryOperator(delegateData,
1093 tfLiteContext,
1094 tfLiteNode,
1095 nodeIndex,
1096 kTfLiteBuiltinSub);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001097 case kTfLiteBuiltinSqrt:
1098 return VisitElementwiseUnaryOperator(delegateData,
1099 tfLiteContext,
1100 tfLiteNode,
1101 nodeIndex,
1102 kTfLiteBuiltinSqrt,
1103 armnn::UnaryOperation::Sqrt);
Matthew Sloyan3504e422023-05-03 13:53:02 +01001104 case kTfLiteBuiltinSqueeze:
1105 return VisitSqueezeOperator(delegateData,
1106 tfLiteContext,
1107 tfLiteNode,
1108 nodeIndex,
1109 kTfLiteBuiltinSqueeze);
Teresa Charlin86b03572023-04-28 13:19:12 +01001110 case kTfLiteBuiltinStridedSlice:
1111 return VisitStridedSliceOperator(delegateData,
1112 tfLiteContext,
1113 tfLiteNode,
1114 nodeIndex,
1115 kTfLiteBuiltinStridedSlice);
John Mcloughlin083586d2023-04-28 18:36:52 +01001116 case kTfLiteBuiltinSum:
1117 return VisitReduceOperator(delegateData,
1118 tfLiteContext,
1119 tfLiteNode,
1120 nodeIndex,
1121 kTfLiteBuiltinSum);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001122 case kTfLiteBuiltinTanh:
1123 return VisitActivationOperator(delegateData,
1124 tfLiteContext,
1125 tfLiteNode,
1126 nodeIndex,
1127 kTfLiteBuiltinTanh);
Teresa Charlin42362962023-04-28 14:23:33 +01001128 case kTfLiteBuiltinTranspose:
1129 return VisitTransposeOperator(delegateData,
1130 tfLiteContext,
1131 tfLiteNode,
1132 nodeIndex,
1133 kTfLiteBuiltinTranspose);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001134 case kTfLiteBuiltinTransposeConv:
1135 return VisitConvolutionOperator(delegateData,
1136 tfLiteContext,
1137 tfLiteNode,
1138 nodeIndex,
1139 kTfLiteBuiltinTransposeConv);
Matthew Sloyan74be13e2023-05-03 17:34:00 +01001140 case kTfLiteBuiltinUnidirectionalSequenceLstm:
1141 return VisitUnidirectionalSequenceLstmOperator(delegateData,
1142 tfLiteContext,
1143 tfLiteNode,
1144 nodeIndex,
1145 kTfLiteBuiltinUnidirectionalSequenceLstm);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001146 case kTfLiteBuiltinUnpack:
1147 return VisitUnpackOperator(delegateData,
1148 tfLiteContext,
1149 tfLiteNode,
1150 nodeIndex,
1151 kTfLiteBuiltinUnpack);
Ryan OSheaac9607f2023-04-03 11:33:33 +01001152 default:
1153 return kTfLiteError;
1154 }
1155}
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001156} // armnnOpaqueDelegate namespace