blob: 510352eae921211b664433715a9dc2e72840ccd9 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Ryan OSheaac9607f2023-04-03 11:33:33 +01007#include <OpaqueDelegateUtils.hpp>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00008
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00009#include "Activation.hpp"
10#include "ArgMinMax.hpp"
11#include "BatchMatMul.hpp"
12#include "BatchSpace.hpp"
13#include "Comparison.hpp"
14#include "Convolution.hpp"
15#include "Control.hpp"
16#include "ElementwiseBinary.hpp"
17#include "ElementwiseUnary.hpp"
18#include "Fill.hpp"
19#include "FullyConnected.hpp"
20#include "Gather.hpp"
21#include "GatherNd.hpp"
22#include "LogicalBinary.hpp"
23#include "Lstm.hpp"
24#include "Normalization.hpp"
25#include "Pack.hpp"
26#include "Pad.hpp"
27#include "Pooling.hpp"
28#include "Prelu.hpp"
29#include "Quantization.hpp"
30#include "Redefine.hpp"
31#include "Reduce.hpp"
32#include "Resize.hpp"
Tracy Narine7306bbe2023-07-17 16:06:26 +010033#include "ReverseV2.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000034#include "Round.hpp"
35#include "Shape.hpp"
36#include "Slice.hpp"
37#include "StridedSlice.hpp"
38#include "Softmax.hpp"
39#include "SpaceDepth.hpp"
40#include "Split.hpp"
41#include "Transpose.hpp"
42#include "UnidirectionalSequenceLstm.hpp"
43#include "Unpack.hpp"
44
45#include <armnn/utility/IgnoreUnused.hpp>
46#include <armnnUtils/Filesystem.hpp>
47#include <armnn/utility/Timer.hpp>
48#include <flatbuffers/flatbuffers.h>
49#include <tensorflow/lite/context_util.h>
50#include <tensorflow/lite/schema/schema_generated.h>
51#include <tensorflow/lite/minimal_logging.h>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000052
53#include <algorithm>
54#include <iostream>
55#include <sstream>
56
57namespace armnnOpaqueDelegate
58{
59
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010060static auto* g_delegate_plugin_ArmnnDelegatePlugin_ =
Ryan OShea59f8f652023-05-11 20:37:53 +010061 new tflite::delegates::DelegatePluginRegistry::Register("armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010062 ArmnnDelegatePlugin::New);
63
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000064ArmnnOpaqueDelegate::ArmnnOpaqueDelegate(armnnDelegate::DelegateOptions options)
65 : m_Options(std::move(options))
66{
67 // Configures logging for ARMNN
68 if (m_Options.IsLoggingEnabled())
69 {
70 armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
71 }
72 // Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all armnn_delegate
73 // instances so the RuntimeOptions cannot be altered for different armnn_delegate instances.
74 m_Runtime = GetRuntime(m_Options.GetRuntimeOptions());
75 std::vector<armnn::BackendId> backends;
76 if (m_Runtime)
77 {
78 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
79 for (auto& backend : m_Options.GetBackends())
80 {
81 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
82 {
83 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
Teresa Charlinf69ae562023-04-27 14:42:23 +010084 "TfLiteArmnnOpaqueDelegate: Requested unknown backend %s", backend.Get().c_str());
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000085 }
86 else
87 {
88 backends.push_back(backend);
89 }
90 }
91 }
92
93 if (backends.empty())
94 {
95 // No known backend specified
96 throw armnn::InvalidArgumentException("TfLiteArmnnOpaqueDelegate: No known backend specified.");
97 }
98 m_Options.SetBackends(backends);
99
100 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnOpaqueDelegate: Created TfLite ArmNN delegate.");
101}
102
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100103TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate* tfLiteDelegate, void* data)
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100104{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100105 // We are required to have the void* data parameter in the function signature, but we don't actually use it.
106 armnn::IgnoreUnused(data);
107
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100108 TfLiteIntArray* supportedOperators =
109 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100110 (TfLiteOpaqueDelegateGetData(tfLiteDelegate))->IdentifyOperatorsToDelegate(tfLiteContext);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100111 if(supportedOperators == nullptr)
112 {
113 return kTfLiteError;
114 }
115
116 // ArmNN Opaque Delegate Registration
117 TfLiteRegistrationExternal* kernelRegistration =
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100118 TfLiteRegistrationExternalCreate(kTfLiteBuiltinDelegate,
Ryan OShea59f8f652023-05-11 20:37:53 +0100119 "armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100120 /*version=*/OPAQUE_DELEGATE_MAJOR_VERSION);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100121 if(kernelRegistration == nullptr)
122 {
123 return kTfLiteError;
124 }
125
126 TfLiteRegistrationExternalSetInit(
127 kernelRegistration,
128 [](TfLiteOpaqueContext* tfLiteContext, const char* buffer, size_t length) -> void*
129 {
130 armnn::IgnoreUnused(length);
131 const TfLiteOpaqueDelegateParams* parameters =
132 reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
133 if(parameters == nullptr)
134 {
135 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
136 "TfLiteArmnnOpaqueDelegate: Unable to get parameters.");
137 return nullptr;
138 }
139
140 return static_cast<void*>(
141 ArmnnSubgraph::Create(tfLiteContext,
142 parameters,
143 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100144 parameters->delegate->opaque_delegate_builder->data)));
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100145 }
146 );
147
148 TfLiteRegistrationExternalSetFree(
149 kernelRegistration,
150 [](TfLiteOpaqueContext* tfLiteContext, void* buffer) -> void
151 {
152 armnn::IgnoreUnused(tfLiteContext);
153 if (buffer != nullptr)
154 {
155 delete static_cast<ArmnnSubgraph*>(buffer);
156 }
157 }
158 );
159
160 TfLiteRegistrationExternalSetPrepare(
161 kernelRegistration,
162 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
163 {
164 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
165 if (userData == nullptr)
166 {
167 return kTfLiteError;
168 }
169 return static_cast<ArmnnSubgraph*>(userData)->Prepare(tfLiteContext);
170 }
171 );
172
173 TfLiteRegistrationExternalSetInvoke(
174 kernelRegistration,
175 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
176 {
177 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
178 if (userData == nullptr)
179 {
180 return kTfLiteError;
181 }
182
183 return static_cast<ArmnnSubgraph*>(userData)->Invoke(tfLiteContext, tfLiteNode);
184 }
185 );
186
187 const TfLiteStatus status =
188 TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
189 tfLiteContext, kernelRegistration, supportedOperators, tfLiteDelegate);
190
191 TfLiteIntArrayFree(supportedOperators);
192 return status;
193}
194
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000195TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings)
196{
197 // This method will always create Opaque Delegate with default settings until
198 // we have a DelegateOptions Constructor which can parse the void* settings
199 armnn::IgnoreUnused(settings);
200 auto options = TfLiteArmnnDelegateOptionsDefault();
201 auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
202 return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
203}
204
205::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault()
206{
207 ::armnnDelegate::DelegateOptions options(armnn::Compute::CpuRef);
208 return options;
209}
210
211void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate)
212{
213 if (tfLiteDelegate != nullptr)
214 {
215 delete static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(TfLiteOpaqueDelegateGetData(tfLiteDelegate));
216 TfLiteOpaqueDelegateDelete(tfLiteDelegate);
217 }
218}
219
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000220const std::string ArmnnOpaqueDelegate::GetVersion() {
221 return OPAQUE_DELEGATE_VERSION;
222}
223
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100224TfLiteIntArray* ArmnnOpaqueDelegate::IdentifyOperatorsToDelegate(TfLiteOpaqueContext* tfLiteContext)
225{
226 TfLiteIntArray* executionPlan = nullptr;
227 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
228 {
229 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unable to get graph execution plan.");
230 return nullptr;
231 }
232
233 // Delegate data with null network
234 DelegateData delegateData(m_Options.GetBackends());
235
236 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
237 if (nodesToDelegate == nullptr)
238 {
239 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
240 "TfLiteArmnnOpaqueDelegate: Unable to create int array from execution plan.");
241 return nullptr;
242 }
243 nodesToDelegate->size = 0;
244
245 std::set<int32_t> unsupportedOperators;
246
247 for (int i = 0; i < executionPlan->size; ++i)
248 {
249 const int nodeIndex = executionPlan->data[i];
250
251 // If TfLiteOpaqueNodes can be delegated to ArmNN
252 TfLiteOpaqueNode* tfLiteNode = nullptr;
253 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
254
255 if (TfLiteOpaqueContextGetNodeAndRegistration(
256 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
257 {
258 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
259 "TfLiteArmnnOpaqueDelegate: Unable to get node and registration for node %d.",
260 nodeIndex);
261 continue;
262 }
263
264 TfLiteStatus visitStatus;
265 try
266 {
267 visitStatus = ArmnnSubgraph::VisitNode(
268 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex);
269 }
270 catch(std::exception& ex)
271 {
272 ARMNN_LOG(error) << "ArmNN Failed to visit node with error: " << ex.what();
273 visitStatus = kTfLiteError;
274 }
275
276 if (visitStatus != kTfLiteOk)
277 {
278 // node is not supported by ArmNN
279 unsupportedOperators.insert(TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration));
280 continue;
281 }
282
283 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
284 }
285
286 for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
287 {
288 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
289 "Operator %s [%d] is not supported by armnn_opaque_delegate.",
290 tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
291 *it);
292 }
293
294 if (!unsupportedOperators.empty() && m_Options.TfLiteRuntimeFallbackDisabled())
295 {
296 std::stringstream exMessage;
297 exMessage << "TfLiteArmnnOpaqueDelegate: There are unsupported operators in the model. ";
298 exMessage << "Not falling back to TfLite Runtime as fallback is disabled. ";
299 exMessage << "This should only be disabled under test conditions.";
300 throw armnn::Exception(exMessage.str());
301 }
302 if (nodesToDelegate->size == 0)
303 {
304 ARMNN_LOG(info) << "No operators in this model are supported by the Arm NN TfLite delegate." <<
305 " The model will be executed entirely by TfLite runtime.";
306 }
307
308 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
309 return nodesToDelegate;
310}
311
Ryan OSheaac9607f2023-04-03 11:33:33 +0100312TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
313 TfLiteOpaqueContext* tfLiteContext,
314 const TfLiteIntArray* inputs,
315 std::vector<armnn::BindingPointInfo>& inputBindings)
316{
317 const size_t numInputs = static_cast<size_t>(inputs->size);
318 for (unsigned int i = 0; i < numInputs; ++i)
319 {
320 const int32_t tensorId = inputs->data[i];
321 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
322
323 if(!tensor)
324 {
325 return kTfLiteError;
326 }
327
328 // Do not create bindings for constant inputs
329 if (TfLiteOpaqueTensorGetAllocationType(tensor) == kTfLiteMmapRo)
330 {
331 continue;
332 }
333
334 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
335 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
336
337 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
338 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
339 outputSlot.SetTensorInfo(tensorInfo);
340
341 // Store for creating connections
342 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
343
344 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
345 }
346
347 return kTfLiteOk;
348}
349
350TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
351 TfLiteOpaqueContext* tfLiteContext,
352 const TfLiteIntArray* outputs,
353 std::vector<armnn::BindingPointInfo>& outputBindings)
354{
355 const size_t numOutputs = static_cast<size_t>(outputs->size);
356 for (unsigned int i = 0; i < numOutputs; ++i)
357 {
358 const int32_t tensorId = outputs->data[i];
359 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
360
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100361 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100362 {
363 return kTfLiteError;
364 }
365
366 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
367 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
368
369 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
370 ARMNN_ASSERT(delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] != nullptr);
371 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
372 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
373 }
374
375 return kTfLiteOk;
376}
377
378ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext,
379 const TfLiteOpaqueDelegateParams* parameters,
380 const ArmnnOpaqueDelegate* delegate)
381{
382 const auto startTime = armnn::GetTimeNow();
383 ARMNN_LOG(info) << "ArmnnSubgraph creation";
384
385 TfLiteIntArray* executionPlan;
386 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
387 {
388 return nullptr;
389 }
390
391 // Initialize DelegateData holds network and output slots information
392 DelegateData delegateData(delegate->m_Options.GetBackends());
393
394 // Build ArmNN Network
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000395 armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions();
Ryan OSheaac9607f2023-04-03 11:33:33 +0100396 armnn::NetworkId networkId;
397 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
398
399 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(
400 TfLiteOpaqueContextGetNumTensors(tfLiteContext), nullptr);
401
402 std::vector<armnn::BindingPointInfo> inputBindings;
403 std::vector<armnn::BindingPointInfo> outputBindings;
404
405 // Add input layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100406 if (AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100407 {
408 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Inputs to the network!");
409 }
410
411 // Parse TfLite delegate nodes to ArmNN
412 const auto parseStartTime = armnn::GetTimeNow();
413 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
414 {
415 const int nodeIndex = parameters->nodes_to_replace->data[i];
416
417 TfLiteOpaqueNode* tfLiteNode = nullptr;
418 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
419 if (TfLiteOpaqueContextGetNodeAndRegistration(
420 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
421 {
422 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to get node registration: " [ nodeIndex]);
423 }
424
425 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
426 {
427 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to parse node: " [ nodeIndex]);
428 }
429 }
430 ARMNN_LOG(info) << "Parse nodes to ArmNN time: " << std::setprecision(2)
431 << std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
432
433 // Add Output layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100434 if (AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100435 {
436 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Outputs to the network!");
437 }
438
439 // Optimize ArmNN network
440 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
441 try
442 {
443 const auto optimizeStartTime = armnn::GetTimeNow();
444 optNet = armnn::Optimize(*(delegateData.m_Network.get()),
445 delegate->m_Options.GetBackends(),
446 delegate->m_Runtime->GetDeviceSpec(),
447 delegate->m_Options.GetOptimizerOptions());
448 ARMNN_LOG(info) << "Optimize ArmnnSubgraph time: " << std::setprecision(2)
449 << std::fixed << armnn::GetTimeDuration(optimizeStartTime).count() << " ms";
450 }
451 catch (std::exception& ex)
452 {
453 std::stringstream exMessage;
454 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from optimize.";
455 throw armnn::Exception(exMessage.str());
456 }
457 if (!optNet)
458 {
459 // Optimize failed
460 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to optimize the network!");
461 }
462
463 // If set, we will serialize the optimized model into a dot file.
464 const std::string serializeToDotFile = delegate->m_Options.GetSerializeToDot();
465 if (!serializeToDotFile.empty())
466 {
467 ARMNN_LOG(info) << "Writing graph to dot file: " << serializeToDotFile;
468 fs::path filename = serializeToDotFile;
469 std::fstream file(filename.c_str(), std::ios_base::out);
470 optNet->SerializeToDot(file);
471 }
472
473 try
474 {
475 const auto loadStartTime = armnn::GetTimeNow();
476
477 // Load graph into runtime
478 std::string errorMessage;
479 armnn::Status loadingStatus;
480 armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
481 armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
482 // There's a bit of an assumption here that the delegate will only support Malloc memory source.
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000483 if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100484 {
485 inputSource = armnn::MemorySource::Malloc;
486 }
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000487 if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100488 {
489 outputSource = armnn::MemorySource::Malloc;
490 }
491 armnn::INetworkProperties networkProperties(false,
492 inputSource,
493 outputSource,
494 delegate->m_Options.GetInternalProfilingState(),
495 delegate->m_Options.GetInternalProfilingDetail());
496 loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
497 std::move(optNet),
498 errorMessage,
499 networkProperties);
500 if (loadingStatus != armnn::Status::Success)
501 {
502 // Network load failed.
503 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Network could not be loaded: " + errorMessage);
504 }
505
506 ARMNN_LOG(info) << "Load ArmnnSubgraph time: " << std::setprecision(2)
507 << std::fixed << armnn::GetTimeDuration(loadStartTime).count() << " ms";
508 }
509 catch (std::exception& ex)
510 {
511 std::stringstream exMessage;
512 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
513 throw armnn::Exception(exMessage.str());
514 }
515
516 // Register debug callback function
517 if (delegate->m_Options.GetDebugCallbackFunction().has_value())
518 {
519 delegate->m_Runtime->RegisterDebugCallback(networkId, delegate->m_Options.GetDebugCallbackFunction().value());
520 }
521
522 ARMNN_LOG(info) << "Overall ArmnnSubgraph creation time: " << std::setprecision(2)
523 << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms\n";
524
525 // Create a new SubGraph with networkId and runtime
526 return new ArmnnSubgraph(networkId, delegate->m_Runtime, inputBindings, outputBindings);
527}
528
529TfLiteStatus ArmnnSubgraph::Prepare(TfLiteOpaqueContext* tfLiteContext)
530{
531 armnn::IgnoreUnused(tfLiteContext);
532 return kTfLiteOk;
533}
534
535TfLiteStatus ArmnnSubgraph::Invoke(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode)
536{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100537 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
538 // This function turns inputIndexArray into an int array of indices. These indices point to the tensors for
539 // each input slot in the node.
540 const int* inputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100541 int numInputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100542 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100543 {
544 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph inputs!");
545 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100546 // Prepare inputs
547 armnn::InputTensors inputTensors;
548 size_t inputIndex = 0;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100549 for (int inputIdx = 0; inputIdx < numInputs; inputIdx++)
550 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100551 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputIndexArray[inputIdx]);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100552
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100553 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100554 {
555 return kTfLiteError;
556 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100557 // If tensor is not read only
Ryan OSheaac9607f2023-04-03 11:33:33 +0100558 if (TfLiteOpaqueTensorGetAllocationType(tensor) != kTfLiteMmapRo)
559 {
560 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
561 armnn::TensorInfo inputTensorInfo = inputBinding.second;
562 inputTensorInfo.SetConstant(true);
563 const armnn::ConstTensor inputTensor(inputTensorInfo, TfLiteOpaqueTensorData(tensor));
Narumol Prangnawarat46e574e2023-05-05 16:39:05 +0100564 inputTensors.emplace_back(inputIndexArray[inputIdx], inputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100565
566 ++inputIndex;
567 }
568 }
569
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100570 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
571 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
572 // each output slot in the node.
573 const int* outputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100574 int numOutputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100575 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100576 {
577 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph outputs!");
578 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100579 // Assign the tensors from the outputIndexArray to the armnn BindingPointInfo
580 armnn::OutputTensors outputTensors;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100581 for (int outputIdx = 0; outputIdx < numOutputs; outputIdx++)
582 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100583 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIdx];
584 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputIndexArray[outputIdx]);
585 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100586 {
587 return kTfLiteError;
588 }
589
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100590 const armnn::Tensor outputTensor(outputBinding.second, reinterpret_cast<TfLiteTensor*>(tensor)->data
591 .data);
592 outputTensors.emplace_back(outputIndexArray[outputIdx], outputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100593 }
594
595 // Run graph
596 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
597 // The delegate holds its own Arm NN runtime so this is our last chance to print internal profiling data.
598 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
599 if (profiler && profiler->IsProfilingEnabled())
600 {
601 profiler->Print(std::cout);
602 }
603 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
604}
605
606TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
607 TfLiteOpaqueContext* tfLiteContext,
608 TfLiteRegistrationExternal* tfLiteRegistration,
609 TfLiteOpaqueNode* tfLiteNode,
610 int nodeIndex)
611{
612 switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
613 {
Teresa Charlinf69ae562023-04-27 14:42:23 +0100614 case kTfLiteBuiltinAbs:
615 return VisitElementwiseUnaryOperator(delegateData,
616 tfLiteContext,
617 tfLiteNode,
618 nodeIndex,
619 kTfLiteBuiltinAbs,
620 armnn::UnaryOperation::Abs);
David Monahan6c53f9f2023-04-27 15:21:19 +0100621 case kTfLiteBuiltinAdd:
622 return VisitElementwiseBinaryOperator(delegateData,
623 tfLiteContext,
624 tfLiteNode,
625 nodeIndex,
626 kTfLiteBuiltinAdd);
John Mcloughlin559d9092023-04-26 20:14:47 +0100627 case kTfLiteBuiltinArgMax:
628 return VisitArgMinMaxOperator(delegateData,
629 tfLiteContext,
630 tfLiteNode,
631 nodeIndex,
632 kTfLiteBuiltinArgMax);
633 case kTfLiteBuiltinArgMin:
634 return VisitArgMinMaxOperator(delegateData,
635 tfLiteContext,
636 tfLiteNode,
637 nodeIndex,
638 kTfLiteBuiltinArgMin);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100639 case kTfLiteBuiltinAveragePool2d:
640 return VisitPooling2dOperator(delegateData,
641 tfLiteContext,
642 tfLiteNode,
643 nodeIndex,
644 kTfLiteBuiltinAveragePool2d);
John Mcloughlin0422cf22023-04-27 16:55:00 +0100645 case kTfLiteBuiltinBatchMatmul:
646 return VisitBatchMatMulOperator(delegateData,
647 tfLiteContext,
648 tfLiteNode,
649 nodeIndex,
650 kTfLiteBuiltinBatchMatmul);
Kevin May81b66f32023-04-26 14:55:36 +0100651 case kTfLiteBuiltinBatchToSpaceNd:
652 return VisitBatchToSpaceNdOperator(delegateData,
653 tfLiteContext,
654 tfLiteNode,
655 nodeIndex,
656 kTfLiteBuiltinBatchToSpaceNd);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100657 case kTfLiteBuiltinCast:
658 return VisitCastOperator(delegateData,
659 tfLiteContext,
660 tfLiteNode,
661 nodeIndex,
662 kTfLiteBuiltinCast);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100663 case kTfLiteBuiltinCeil:
664 return VisitElementwiseUnaryOperator(delegateData,
665 tfLiteContext,
666 tfLiteNode,
667 nodeIndex,
668 kTfLiteBuiltinCeil,
669 armnn::UnaryOperation::Ceil);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100670 case kTfLiteBuiltinConcatenation:
671 return VisitControlOperator(delegateData,
672 tfLiteContext,
673 tfLiteNode,
674 nodeIndex,
675 kTfLiteBuiltinConcatenation);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100676 case kTfLiteBuiltinConv2d:
677 return VisitConvolutionOperator(delegateData,
678 tfLiteContext,
679 tfLiteNode,
680 nodeIndex,
681 kTfLiteBuiltinConv2d);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100682 case kTfLiteBuiltinConv3d:
683 return VisitConvolutionOperator(delegateData,
684 tfLiteContext,
685 tfLiteNode,
686 nodeIndex,
687 kTfLiteBuiltinConv3d);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100688 case kTfLiteBuiltinCustom:
689 {
690 // Custom operators are defined by the name rather than the builtin code.
691 // Parse the custom_name param in the registration to point to the correct visitor function.
692 std::string customOperatorName = TfLiteRegistrationExternalGetCustomName(tfLiteRegistration);
693 if ( customOperatorName == "AveragePool3D" )
694 {
695 return VisitPooling3dOperator(delegateData,
696 tfLiteContext,
697 tfLiteNode,
698 nodeIndex,
699 customOperatorName);
700 }
701 else if (customOperatorName == "MaxPool3D")
702 {
703 return VisitPooling3dOperator(delegateData,
704 tfLiteContext,
705 tfLiteNode,
706 nodeIndex,
707 customOperatorName);
708 }
709 // Invalid or unsupported custom operator
710 return kTfLiteError;
711 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100712 case kTfLiteBuiltinDepthwiseConv2d:
713 return VisitConvolutionOperator(delegateData,
714 tfLiteContext,
715 tfLiteNode,
716 nodeIndex,
717 kTfLiteBuiltinDepthwiseConv2d);
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100718 case kTfLiteBuiltinDequantize:
719 return VisitDequantizeOperator(delegateData,
720 tfLiteContext,
721 tfLiteNode,
722 nodeIndex,
723 kTfLiteBuiltinDequantize);
David Monahan6c53f9f2023-04-27 15:21:19 +0100724 case kTfLiteBuiltinDiv:
725 return VisitElementwiseBinaryOperator(delegateData,
726 tfLiteContext,
727 tfLiteNode,
728 nodeIndex,
729 kTfLiteBuiltinDiv);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100730 case kTfLiteBuiltinEqual:
731 return VisitComparisonOperator(delegateData,
732 tfLiteContext,
733 tfLiteNode,
734 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100735 kTfLiteBuiltinEqual,
736 armnn::ComparisonOperation::Equal);
Teresa Charlin42362962023-04-28 14:23:33 +0100737 case kTfLiteBuiltinDepthToSpace:
738 return VisitDepthToSpaceOperator(delegateData,
739 tfLiteContext,
740 tfLiteNode,
741 nodeIndex,
742 kTfLiteBuiltinDepthToSpace);
743 case kTfLiteBuiltinElu:
744 return VisitActivationOperator(delegateData,
745 tfLiteContext,
746 tfLiteNode,
747 nodeIndex,
748 kTfLiteBuiltinElu);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100749 case kTfLiteBuiltinExp:
750 return VisitElementwiseUnaryOperator(delegateData,
751 tfLiteContext,
752 tfLiteNode,
753 nodeIndex,
754 kTfLiteBuiltinExp,
755 armnn::UnaryOperation::Exp);
Matthew Sloyan3504e422023-05-03 13:53:02 +0100756 case kTfLiteBuiltinExpandDims:
757 return VisitExpandDimsOperator(delegateData,
758 tfLiteContext,
759 tfLiteNode,
760 nodeIndex,
761 kTfLiteBuiltinExpandDims);
Ryan OShea59f8f652023-05-11 20:37:53 +0100762 case kTfLiteBuiltinFill:
763 return VisitFillOperator(delegateData,
764 tfLiteContext,
765 tfLiteNode,
766 nodeIndex,
767 kTfLiteBuiltinFill);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100768 case kTfLiteBuiltinFloor:
769 return VisitFloorOperator(delegateData,
770 tfLiteContext,
771 tfLiteNode,
772 nodeIndex,
773 kTfLiteBuiltinFloor);
David Monahan6c53f9f2023-04-27 15:21:19 +0100774 case kTfLiteBuiltinFloorDiv:
775 return VisitElementwiseBinaryOperator(delegateData,
776 tfLiteContext,
777 tfLiteNode,
778 nodeIndex,
779 kTfLiteBuiltinFloorDiv);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100780 case kTfLiteBuiltinFullyConnected:
781 return VisitFullyConnectedOperator(delegateData,
782 tfLiteContext,
783 tfLiteNode,
784 nodeIndex,
785 kTfLiteBuiltinFullyConnected);
Kevin Mayb2831c52023-04-26 17:27:24 +0100786 case kTfLiteBuiltinGather:
787 return VisitGatherOperator(delegateData,
788 tfLiteContext,
789 tfLiteNode,
790 nodeIndex,
791 kTfLiteBuiltinGather);
792 case kTfLiteBuiltinGatherNd:
793 return VisitGatherNdOperator(delegateData,
794 tfLiteContext,
795 tfLiteNode,
796 nodeIndex,
797 kTfLiteBuiltinGatherNd);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100798 case kTfLiteBuiltinGreater:
799 return VisitComparisonOperator(delegateData,
800 tfLiteContext,
801 tfLiteNode,
802 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100803 kTfLiteBuiltinGreater,
804 armnn::ComparisonOperation::Greater);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100805 case kTfLiteBuiltinGreaterEqual:
806 return VisitComparisonOperator(delegateData,
807 tfLiteContext,
808 tfLiteNode,
809 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100810 kTfLiteBuiltinGreaterEqual,
811 armnn::ComparisonOperation::GreaterOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100812 case kTfLiteBuiltinHardSwish:
813 return VisitActivationOperator(delegateData,
814 tfLiteContext,
815 tfLiteNode,
816 nodeIndex,
817 kTfLiteBuiltinHardSwish);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100818 case kTfLiteBuiltinL2Normalization:
819 return VisitL2NormalizationOperator(delegateData,
820 tfLiteContext,
821 tfLiteNode,
822 nodeIndex,
823 kTfLiteBuiltinL2Normalization);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100824 case kTfLiteBuiltinL2Pool2d:
825 return VisitPooling2dOperator(delegateData,
826 tfLiteContext,
827 tfLiteNode,
828 nodeIndex,
829 kTfLiteBuiltinL2Pool2d);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100830 case kTfLiteBuiltinLess:
831 return VisitComparisonOperator(delegateData,
832 tfLiteContext,
833 tfLiteNode,
834 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100835 kTfLiteBuiltinLess,
836 armnn::ComparisonOperation::Less);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100837 case kTfLiteBuiltinLessEqual:
838 return VisitComparisonOperator(delegateData,
839 tfLiteContext,
840 tfLiteNode,
841 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100842 kTfLiteBuiltinLessEqual,
843 armnn::ComparisonOperation::LessOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100844 case kTfLiteBuiltinLogistic:
845 return VisitActivationOperator(delegateData,
846 tfLiteContext,
847 tfLiteNode,
848 nodeIndex,
849 kTfLiteBuiltinLogistic);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100850 case kTfLiteBuiltinLocalResponseNormalization:
851 return VisitLocalResponseNormalizationOperator(delegateData,
852 tfLiteContext,
853 tfLiteNode,
854 nodeIndex,
855 kTfLiteBuiltinLocalResponseNormalization);
856 case kTfLiteBuiltinLog:
857 return VisitElementwiseUnaryOperator(delegateData,
858 tfLiteContext,
859 tfLiteNode,
860 nodeIndex,
861 kTfLiteBuiltinLog,
862 armnn::UnaryOperation::Log);
863 case kTfLiteBuiltinLogicalAnd:
864 return VisitLogicalBinaryOperator(delegateData,
865 tfLiteContext,
866 tfLiteNode,
867 nodeIndex,
868 kTfLiteBuiltinLogicalAnd,
869 armnn::LogicalBinaryOperation::LogicalAnd);
870 case kTfLiteBuiltinLogicalNot:
871 return VisitElementwiseUnaryOperator(delegateData,
872 tfLiteContext,
873 tfLiteNode,
874 nodeIndex,
875 kTfLiteBuiltinLogicalNot,
876 armnn::UnaryOperation::LogicalNot);
877 case kTfLiteBuiltinLogicalOr:
878 return VisitLogicalBinaryOperator(delegateData,
879 tfLiteContext,
880 tfLiteNode,
881 nodeIndex,
882 kTfLiteBuiltinLogicalOr,
883 armnn::LogicalBinaryOperation::LogicalOr);
Teresa Charlin42362962023-04-28 14:23:33 +0100884 case kTfLiteBuiltinLogSoftmax:
885 return VisitSoftmaxOperator(delegateData,
886 tfLiteContext,
887 tfLiteNode,
888 nodeIndex,
889 kTfLiteBuiltinLogSoftmax);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100890 case kTfLiteBuiltinLstm:
891 return VisitLstmOperator(delegateData,
892 tfLiteContext,
893 tfLiteNode,
894 nodeIndex,
895 kTfLiteBuiltinLstm);
896 case kTfLiteBuiltinMaxPool2d:
897 return VisitPooling2dOperator(delegateData,
898 tfLiteContext,
899 tfLiteNode,
900 nodeIndex,
901 kTfLiteBuiltinMaxPool2d);
David Monahan6c53f9f2023-04-27 15:21:19 +0100902 case kTfLiteBuiltinMaximum:
903 return VisitElementwiseBinaryOperator(delegateData,
904 tfLiteContext,
905 tfLiteNode,
906 nodeIndex,
907 kTfLiteBuiltinMaximum);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100908 case kTfLiteBuiltinMean:
909 return VisitControlOperator(delegateData,
910 tfLiteContext,
911 tfLiteNode,
912 nodeIndex,
913 kTfLiteBuiltinMean);
David Monahan6c53f9f2023-04-27 15:21:19 +0100914 case kTfLiteBuiltinMinimum:
915 return VisitElementwiseBinaryOperator(delegateData,
916 tfLiteContext,
917 tfLiteNode,
918 nodeIndex,
919 kTfLiteBuiltinMinimum);
Ryan OShea59f8f652023-05-11 20:37:53 +0100920 case kTfLiteBuiltinMirrorPad:
921 return VisitPadOperator(delegateData,
922 tfLiteContext,
923 tfLiteNode,
924 nodeIndex,
925 kTfLiteBuiltinMirrorPad);
David Monahan6c53f9f2023-04-27 15:21:19 +0100926 case kTfLiteBuiltinMul:
927 return VisitElementwiseBinaryOperator(delegateData,
928 tfLiteContext,
929 tfLiteNode,
930 nodeIndex,
931 kTfLiteBuiltinMul);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100932 case kTfLiteBuiltinNeg:
933 return VisitElementwiseUnaryOperator(delegateData,
934 tfLiteContext,
935 tfLiteNode,
936 nodeIndex,
937 kTfLiteBuiltinNeg,
938 armnn::UnaryOperation::Neg);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100939 case kTfLiteBuiltinNotEqual:
940 return VisitComparisonOperator(delegateData,
941 tfLiteContext,
942 tfLiteNode,
943 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100944 kTfLiteBuiltinNotEqual,
945 armnn::ComparisonOperation::NotEqual);
Teresa Charlinecebb0f2023-04-27 21:37:56 +0100946 case kTfLiteBuiltinPack:
947 return VisitPackOperator(delegateData,
948 tfLiteContext,
949 tfLiteNode,
950 nodeIndex,
951 kTfLiteBuiltinPack);
952 case kTfLiteBuiltinPad:
953 return VisitPadOperator(delegateData,
954 tfLiteContext,
955 tfLiteNode,
956 nodeIndex,
957 kTfLiteBuiltinPad);
958 case kTfLiteBuiltinPadv2:
959 return VisitPadOperator(delegateData,
960 tfLiteContext,
961 tfLiteNode,
962 nodeIndex,
963 kTfLiteBuiltinPadv2);
John Mcloughlin0ec00872023-05-15 17:03:49 +0100964 case kTfLiteBuiltinPow:
965 return VisitElementwiseBinaryOperator(delegateData,
966 tfLiteContext,
967 tfLiteNode,
968 nodeIndex,
969 kTfLiteBuiltinPow);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100970 case kTfLiteBuiltinPrelu:
971 return VisitPreluOperator(delegateData,
972 tfLiteContext,
973 tfLiteNode,
974 nodeIndex,
975 kTfLiteBuiltinPrelu);
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100976 case kTfLiteBuiltinQuantize:
977 return VisitQuantizeOperator(delegateData,
978 tfLiteContext,
979 tfLiteNode,
980 nodeIndex,
981 kTfLiteBuiltinQuantize);
John Mcloughlin083586d2023-04-28 18:36:52 +0100982 case kTfLiteBuiltinReduceMax:
983 return VisitReduceOperator(delegateData,
984 tfLiteContext,
985 tfLiteNode,
986 nodeIndex,
987 kTfLiteBuiltinReduceMax);
988 case kTfLiteBuiltinReduceMin:
989 return VisitReduceOperator(delegateData,
990 tfLiteContext,
991 tfLiteNode,
992 nodeIndex,
993 kTfLiteBuiltinReduceMin);
994 case kTfLiteBuiltinReduceProd:
995 return VisitReduceOperator(delegateData,
996 tfLiteContext,
997 tfLiteNode,
998 nodeIndex,
999 kTfLiteBuiltinReduceProd);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001000 case kTfLiteBuiltinRelu:
1001 return VisitActivationOperator(delegateData,
1002 tfLiteContext,
1003 tfLiteNode,
1004 nodeIndex,
1005 kTfLiteBuiltinRelu);
1006 case kTfLiteBuiltinReluN1To1:
1007 return VisitActivationOperator(delegateData,
1008 tfLiteContext,
1009 tfLiteNode,
1010 nodeIndex,
1011 kTfLiteBuiltinReluN1To1);
1012 case kTfLiteBuiltinRelu6:
1013 return VisitActivationOperator(delegateData,
1014 tfLiteContext,
1015 tfLiteNode,
1016 nodeIndex,
1017 kTfLiteBuiltinRelu6);
Matthew Sloyanc49aacc2023-04-28 17:27:26 +01001018 case kTfLiteBuiltinReshape:
1019 return VisitReshapeOperator(delegateData,
1020 tfLiteContext,
1021 tfLiteNode,
1022 nodeIndex,
1023 kTfLiteBuiltinReshape);
John Mcloughlin083586d2023-04-28 18:36:52 +01001024 case kTfLiteBuiltinResizeNearestNeighbor:
1025 return VisitResizeOperator(delegateData,
1026 tfLiteContext,
1027 tfLiteNode,
1028 nodeIndex,
1029 kTfLiteBuiltinResizeNearestNeighbor);
1030 case kTfLiteBuiltinResizeBilinear:
1031 return VisitResizeOperator(delegateData,
1032 tfLiteContext,
1033 tfLiteNode,
1034 nodeIndex,
1035 kTfLiteBuiltinResizeBilinear);
Tracy Narine7306bbe2023-07-17 16:06:26 +01001036 case kTfLiteBuiltinReverseV2:
1037 return VisitReverseV2Operator(delegateData,
1038 tfLiteContext,
1039 tfLiteNode,
1040 nodeIndex,
1041 kTfLiteBuiltinReverseV2);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001042 case kTfLiteBuiltinRsqrt:
1043 return VisitElementwiseUnaryOperator(delegateData,
1044 tfLiteContext,
1045 tfLiteNode,
1046 nodeIndex,
1047 kTfLiteBuiltinRsqrt,
1048 armnn::UnaryOperation::Rsqrt);
John Mcloughlin0422cf22023-04-27 16:55:00 +01001049 case kTfLiteBuiltinShape:
1050 return VisitShapeOperator(delegateData,
1051 tfLiteContext,
1052 tfLiteNode,
1053 nodeIndex,
1054 kTfLiteBuiltinShape);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001055 case kTfLiteBuiltinSin:
1056 return VisitElementwiseUnaryOperator(delegateData,
1057 tfLiteContext,
1058 tfLiteNode,
1059 nodeIndex,
1060 kTfLiteBuiltinSin,
1061 armnn::UnaryOperation::Sin);
Teresa Charlin86b03572023-04-28 13:19:12 +01001062 case kTfLiteBuiltinSlice:
1063 return VisitSliceOperator(delegateData,
1064 tfLiteContext,
1065 tfLiteNode,
1066 nodeIndex,
1067 kTfLiteBuiltinSlice);
Teresa Charlin42362962023-04-28 14:23:33 +01001068 case kTfLiteBuiltinSoftmax:
1069 return VisitSoftmaxOperator(delegateData,
1070 tfLiteContext,
1071 tfLiteNode,
1072 nodeIndex,
1073 kTfLiteBuiltinSoftmax);
Kevin May81b66f32023-04-26 14:55:36 +01001074 case kTfLiteBuiltinSpaceToBatchNd:
1075 return VisitSpaceToBatchNdOperator(delegateData,
1076 tfLiteContext,
1077 tfLiteNode,
1078 nodeIndex,
1079 kTfLiteBuiltinSpaceToBatchNd);
Teresa Charlin42362962023-04-28 14:23:33 +01001080 case kTfLiteBuiltinSpaceToDepth:
1081 return VisitSpaceToDepthOperator(delegateData,
1082 tfLiteContext,
1083 tfLiteNode,
1084 nodeIndex,
1085 kTfLiteBuiltinSpaceToDepth);
David Monahanc833cef2023-05-03 15:53:03 +01001086 case kTfLiteBuiltinSplit:
1087 return VisitSplitOperator(delegateData,
1088 tfLiteContext,
1089 tfLiteNode,
1090 nodeIndex,
1091 kTfLiteBuiltinSplit);
1092 case kTfLiteBuiltinSplitV:
1093 return VisitSplitVOperator(delegateData,
1094 tfLiteContext,
1095 tfLiteNode,
1096 nodeIndex,
1097 kTfLiteBuiltinSplitV);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001098 case kTfLiteBuiltinSquaredDifference:
1099 return VisitElementwiseBinaryOperator(delegateData,
1100 tfLiteContext,
1101 tfLiteNode,
1102 nodeIndex,
1103 kTfLiteBuiltinSquaredDifference);
David Monahan6c53f9f2023-04-27 15:21:19 +01001104 case kTfLiteBuiltinSub:
1105 return VisitElementwiseBinaryOperator(delegateData,
1106 tfLiteContext,
1107 tfLiteNode,
1108 nodeIndex,
1109 kTfLiteBuiltinSub);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001110 case kTfLiteBuiltinSqrt:
1111 return VisitElementwiseUnaryOperator(delegateData,
1112 tfLiteContext,
1113 tfLiteNode,
1114 nodeIndex,
1115 kTfLiteBuiltinSqrt,
1116 armnn::UnaryOperation::Sqrt);
Matthew Sloyan3504e422023-05-03 13:53:02 +01001117 case kTfLiteBuiltinSqueeze:
1118 return VisitSqueezeOperator(delegateData,
1119 tfLiteContext,
1120 tfLiteNode,
1121 nodeIndex,
1122 kTfLiteBuiltinSqueeze);
Teresa Charlin86b03572023-04-28 13:19:12 +01001123 case kTfLiteBuiltinStridedSlice:
1124 return VisitStridedSliceOperator(delegateData,
1125 tfLiteContext,
1126 tfLiteNode,
1127 nodeIndex,
1128 kTfLiteBuiltinStridedSlice);
John Mcloughlin083586d2023-04-28 18:36:52 +01001129 case kTfLiteBuiltinSum:
1130 return VisitReduceOperator(delegateData,
1131 tfLiteContext,
1132 tfLiteNode,
1133 nodeIndex,
1134 kTfLiteBuiltinSum);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001135 case kTfLiteBuiltinTanh:
1136 return VisitActivationOperator(delegateData,
1137 tfLiteContext,
1138 tfLiteNode,
1139 nodeIndex,
1140 kTfLiteBuiltinTanh);
Teresa Charlin42362962023-04-28 14:23:33 +01001141 case kTfLiteBuiltinTranspose:
1142 return VisitTransposeOperator(delegateData,
1143 tfLiteContext,
1144 tfLiteNode,
1145 nodeIndex,
1146 kTfLiteBuiltinTranspose);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001147 case kTfLiteBuiltinTransposeConv:
1148 return VisitConvolutionOperator(delegateData,
1149 tfLiteContext,
1150 tfLiteNode,
1151 nodeIndex,
1152 kTfLiteBuiltinTransposeConv);
Matthew Sloyan74be13e2023-05-03 17:34:00 +01001153 case kTfLiteBuiltinUnidirectionalSequenceLstm:
1154 return VisitUnidirectionalSequenceLstmOperator(delegateData,
1155 tfLiteContext,
1156 tfLiteNode,
1157 nodeIndex,
1158 kTfLiteBuiltinUnidirectionalSequenceLstm);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001159 case kTfLiteBuiltinUnpack:
1160 return VisitUnpackOperator(delegateData,
1161 tfLiteContext,
1162 tfLiteNode,
1163 nodeIndex,
1164 kTfLiteBuiltinUnpack);
Ryan OSheaac9607f2023-04-03 11:33:33 +01001165 default:
1166 return kTfLiteError;
1167 }
1168}
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001169} // armnnOpaqueDelegate namespace