blob: 83e90a00266696118a1a7c6d965c1d91cf7bf307 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Ryan OSheaac9607f2023-04-03 11:33:33 +01007#include <OpaqueDelegateUtils.hpp>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00008
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00009#include "Activation.hpp"
10#include "ArgMinMax.hpp"
11#include "BatchMatMul.hpp"
12#include "BatchSpace.hpp"
Idriss Chaouchcbf79292023-09-08 11:18:16 +010013#include "BroadcastTo.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000014#include "Comparison.hpp"
15#include "Convolution.hpp"
16#include "Control.hpp"
17#include "ElementwiseBinary.hpp"
18#include "ElementwiseUnary.hpp"
19#include "Fill.hpp"
20#include "FullyConnected.hpp"
21#include "Gather.hpp"
22#include "GatherNd.hpp"
23#include "LogicalBinary.hpp"
24#include "Lstm.hpp"
25#include "Normalization.hpp"
26#include "Pack.hpp"
27#include "Pad.hpp"
28#include "Pooling.hpp"
29#include "Prelu.hpp"
30#include "Quantization.hpp"
31#include "Redefine.hpp"
32#include "Reduce.hpp"
33#include "Resize.hpp"
Tracy Narine7306bbe2023-07-17 16:06:26 +010034#include "ReverseV2.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000035#include "Round.hpp"
36#include "Shape.hpp"
37#include "Slice.hpp"
38#include "StridedSlice.hpp"
39#include "Softmax.hpp"
40#include "SpaceDepth.hpp"
41#include "Split.hpp"
Tianle Cheng92ce35c2023-07-25 16:41:00 +010042#include "Tile.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000043#include "Transpose.hpp"
44#include "UnidirectionalSequenceLstm.hpp"
45#include "Unpack.hpp"
46
47#include <armnn/utility/IgnoreUnused.hpp>
48#include <armnnUtils/Filesystem.hpp>
49#include <armnn/utility/Timer.hpp>
50#include <flatbuffers/flatbuffers.h>
51#include <tensorflow/lite/context_util.h>
52#include <tensorflow/lite/schema/schema_generated.h>
53#include <tensorflow/lite/minimal_logging.h>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000054
55#include <algorithm>
56#include <iostream>
57#include <sstream>
58
59namespace armnnOpaqueDelegate
60{
61
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010062static auto* g_delegate_plugin_ArmnnDelegatePlugin_ =
Ryan OShea59f8f652023-05-11 20:37:53 +010063 new tflite::delegates::DelegatePluginRegistry::Register("armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010064 ArmnnDelegatePlugin::New);
65
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000066ArmnnOpaqueDelegate::ArmnnOpaqueDelegate(armnnDelegate::DelegateOptions options)
67 : m_Options(std::move(options))
68{
69 // Configures logging for ARMNN
70 if (m_Options.IsLoggingEnabled())
71 {
72 armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
73 }
74 // Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all armnn_delegate
75 // instances so the RuntimeOptions cannot be altered for different armnn_delegate instances.
76 m_Runtime = GetRuntime(m_Options.GetRuntimeOptions());
77 std::vector<armnn::BackendId> backends;
78 if (m_Runtime)
79 {
80 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
81 for (auto& backend : m_Options.GetBackends())
82 {
83 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
84 {
85 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
Teresa Charlinf69ae562023-04-27 14:42:23 +010086 "TfLiteArmnnOpaqueDelegate: Requested unknown backend %s", backend.Get().c_str());
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000087 }
88 else
89 {
90 backends.push_back(backend);
91 }
92 }
93 }
94
95 if (backends.empty())
96 {
97 // No known backend specified
98 throw armnn::InvalidArgumentException("TfLiteArmnnOpaqueDelegate: No known backend specified.");
99 }
100 m_Options.SetBackends(backends);
101
102 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnOpaqueDelegate: Created TfLite ArmNN delegate.");
103}
104
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100105TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate* tfLiteDelegate, void* data)
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100106{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100107 // We are required to have the void* data parameter in the function signature, but we don't actually use it.
108 armnn::IgnoreUnused(data);
109
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100110 TfLiteIntArray* supportedOperators =
111 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100112 (TfLiteOpaqueDelegateGetData(tfLiteDelegate))->IdentifyOperatorsToDelegate(tfLiteContext);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100113 if(supportedOperators == nullptr)
114 {
115 return kTfLiteError;
116 }
117
118 // ArmNN Opaque Delegate Registration
119 TfLiteRegistrationExternal* kernelRegistration =
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100120 TfLiteRegistrationExternalCreate(kTfLiteBuiltinDelegate,
Ryan OShea59f8f652023-05-11 20:37:53 +0100121 "armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100122 /*version=*/OPAQUE_DELEGATE_MAJOR_VERSION);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100123 if(kernelRegistration == nullptr)
124 {
125 return kTfLiteError;
126 }
127
128 TfLiteRegistrationExternalSetInit(
129 kernelRegistration,
130 [](TfLiteOpaqueContext* tfLiteContext, const char* buffer, size_t length) -> void*
131 {
132 armnn::IgnoreUnused(length);
133 const TfLiteOpaqueDelegateParams* parameters =
134 reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
135 if(parameters == nullptr)
136 {
137 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
138 "TfLiteArmnnOpaqueDelegate: Unable to get parameters.");
139 return nullptr;
140 }
141
142 return static_cast<void*>(
143 ArmnnSubgraph::Create(tfLiteContext,
144 parameters,
145 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100146 parameters->delegate->opaque_delegate_builder->data)));
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100147 }
148 );
149
150 TfLiteRegistrationExternalSetFree(
151 kernelRegistration,
152 [](TfLiteOpaqueContext* tfLiteContext, void* buffer) -> void
153 {
154 armnn::IgnoreUnused(tfLiteContext);
155 if (buffer != nullptr)
156 {
157 delete static_cast<ArmnnSubgraph*>(buffer);
158 }
159 }
160 );
161
162 TfLiteRegistrationExternalSetPrepare(
163 kernelRegistration,
164 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
165 {
166 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
167 if (userData == nullptr)
168 {
169 return kTfLiteError;
170 }
171 return static_cast<ArmnnSubgraph*>(userData)->Prepare(tfLiteContext);
172 }
173 );
174
175 TfLiteRegistrationExternalSetInvoke(
176 kernelRegistration,
177 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
178 {
179 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
180 if (userData == nullptr)
181 {
182 return kTfLiteError;
183 }
184
185 return static_cast<ArmnnSubgraph*>(userData)->Invoke(tfLiteContext, tfLiteNode);
186 }
187 );
188
189 const TfLiteStatus status =
190 TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
191 tfLiteContext, kernelRegistration, supportedOperators, tfLiteDelegate);
192
193 TfLiteIntArrayFree(supportedOperators);
194 return status;
195}
196
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000197TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings)
198{
199 // This method will always create Opaque Delegate with default settings until
200 // we have a DelegateOptions Constructor which can parse the void* settings
201 armnn::IgnoreUnused(settings);
202 auto options = TfLiteArmnnDelegateOptionsDefault();
203 auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
204 return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
205}
206
207::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault()
208{
209 ::armnnDelegate::DelegateOptions options(armnn::Compute::CpuRef);
210 return options;
211}
212
213void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate)
214{
215 if (tfLiteDelegate != nullptr)
216 {
217 delete static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(TfLiteOpaqueDelegateGetData(tfLiteDelegate));
218 TfLiteOpaqueDelegateDelete(tfLiteDelegate);
219 }
220}
221
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000222const std::string ArmnnOpaqueDelegate::GetVersion() {
223 return OPAQUE_DELEGATE_VERSION;
224}
225
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100226TfLiteIntArray* ArmnnOpaqueDelegate::IdentifyOperatorsToDelegate(TfLiteOpaqueContext* tfLiteContext)
227{
228 TfLiteIntArray* executionPlan = nullptr;
229 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
230 {
231 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unable to get graph execution plan.");
232 return nullptr;
233 }
234
235 // Delegate data with null network
236 DelegateData delegateData(m_Options.GetBackends());
237
238 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
239 if (nodesToDelegate == nullptr)
240 {
241 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
242 "TfLiteArmnnOpaqueDelegate: Unable to create int array from execution plan.");
243 return nullptr;
244 }
245 nodesToDelegate->size = 0;
246
247 std::set<int32_t> unsupportedOperators;
248
249 for (int i = 0; i < executionPlan->size; ++i)
250 {
251 const int nodeIndex = executionPlan->data[i];
252
253 // If TfLiteOpaqueNodes can be delegated to ArmNN
254 TfLiteOpaqueNode* tfLiteNode = nullptr;
255 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
256
257 if (TfLiteOpaqueContextGetNodeAndRegistration(
258 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
259 {
260 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
261 "TfLiteArmnnOpaqueDelegate: Unable to get node and registration for node %d.",
262 nodeIndex);
263 continue;
264 }
265
266 TfLiteStatus visitStatus;
267 try
268 {
269 visitStatus = ArmnnSubgraph::VisitNode(
270 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex);
271 }
272 catch(std::exception& ex)
273 {
274 ARMNN_LOG(error) << "ArmNN Failed to visit node with error: " << ex.what();
275 visitStatus = kTfLiteError;
276 }
277
278 if (visitStatus != kTfLiteOk)
279 {
280 // node is not supported by ArmNN
281 unsupportedOperators.insert(TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration));
282 continue;
283 }
284
285 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
286 }
287
288 for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
289 {
290 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
291 "Operator %s [%d] is not supported by armnn_opaque_delegate.",
292 tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
293 *it);
294 }
295
296 if (!unsupportedOperators.empty() && m_Options.TfLiteRuntimeFallbackDisabled())
297 {
298 std::stringstream exMessage;
299 exMessage << "TfLiteArmnnOpaqueDelegate: There are unsupported operators in the model. ";
300 exMessage << "Not falling back to TfLite Runtime as fallback is disabled. ";
301 exMessage << "This should only be disabled under test conditions.";
302 throw armnn::Exception(exMessage.str());
303 }
304 if (nodesToDelegate->size == 0)
305 {
306 ARMNN_LOG(info) << "No operators in this model are supported by the Arm NN TfLite delegate." <<
307 " The model will be executed entirely by TfLite runtime.";
308 }
309
310 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
311 return nodesToDelegate;
312}
313
Ryan OSheaac9607f2023-04-03 11:33:33 +0100314TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
315 TfLiteOpaqueContext* tfLiteContext,
316 const TfLiteIntArray* inputs,
317 std::vector<armnn::BindingPointInfo>& inputBindings)
318{
319 const size_t numInputs = static_cast<size_t>(inputs->size);
320 for (unsigned int i = 0; i < numInputs; ++i)
321 {
322 const int32_t tensorId = inputs->data[i];
323 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
324
325 if(!tensor)
326 {
327 return kTfLiteError;
328 }
329
330 // Do not create bindings for constant inputs
331 if (TfLiteOpaqueTensorGetAllocationType(tensor) == kTfLiteMmapRo)
332 {
333 continue;
334 }
335
336 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
337 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
338
339 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
340 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
341 outputSlot.SetTensorInfo(tensorInfo);
342
343 // Store for creating connections
344 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
345
346 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
347 }
348
349 return kTfLiteOk;
350}
351
352TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
353 TfLiteOpaqueContext* tfLiteContext,
354 const TfLiteIntArray* outputs,
355 std::vector<armnn::BindingPointInfo>& outputBindings)
356{
357 const size_t numOutputs = static_cast<size_t>(outputs->size);
358 for (unsigned int i = 0; i < numOutputs; ++i)
359 {
360 const int32_t tensorId = outputs->data[i];
361 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
362
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100363 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100364 {
365 return kTfLiteError;
366 }
367
368 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
369 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
370
371 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
Ryan OSheac229b3f2023-06-27 22:34:54 +0100372
373 if (delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] == nullptr)
374 {
375 return kTfLiteError;
376 }
377
Ryan OSheaac9607f2023-04-03 11:33:33 +0100378 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
379 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
380 }
381
382 return kTfLiteOk;
383}
384
385ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext,
386 const TfLiteOpaqueDelegateParams* parameters,
387 const ArmnnOpaqueDelegate* delegate)
388{
389 const auto startTime = armnn::GetTimeNow();
390 ARMNN_LOG(info) << "ArmnnSubgraph creation";
391
392 TfLiteIntArray* executionPlan;
393 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
394 {
395 return nullptr;
396 }
397
398 // Initialize DelegateData holds network and output slots information
399 DelegateData delegateData(delegate->m_Options.GetBackends());
400
401 // Build ArmNN Network
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000402 armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions();
Ryan OSheaac9607f2023-04-03 11:33:33 +0100403 armnn::NetworkId networkId;
404 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
405
406 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(
407 TfLiteOpaqueContextGetNumTensors(tfLiteContext), nullptr);
408
409 std::vector<armnn::BindingPointInfo> inputBindings;
410 std::vector<armnn::BindingPointInfo> outputBindings;
411
412 // Add input layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100413 if (AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100414 {
415 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Inputs to the network!");
416 }
417
418 // Parse TfLite delegate nodes to ArmNN
419 const auto parseStartTime = armnn::GetTimeNow();
420 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
421 {
422 const int nodeIndex = parameters->nodes_to_replace->data[i];
423
424 TfLiteOpaqueNode* tfLiteNode = nullptr;
425 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
426 if (TfLiteOpaqueContextGetNodeAndRegistration(
427 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
428 {
429 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to get node registration: " [ nodeIndex]);
430 }
431
432 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
433 {
434 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to parse node: " [ nodeIndex]);
435 }
436 }
437 ARMNN_LOG(info) << "Parse nodes to ArmNN time: " << std::setprecision(2)
438 << std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
439
440 // Add Output layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100441 if (AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100442 {
443 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Outputs to the network!");
444 }
445
446 // Optimize ArmNN network
447 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
448 try
449 {
450 const auto optimizeStartTime = armnn::GetTimeNow();
451 optNet = armnn::Optimize(*(delegateData.m_Network.get()),
452 delegate->m_Options.GetBackends(),
453 delegate->m_Runtime->GetDeviceSpec(),
454 delegate->m_Options.GetOptimizerOptions());
455 ARMNN_LOG(info) << "Optimize ArmnnSubgraph time: " << std::setprecision(2)
456 << std::fixed << armnn::GetTimeDuration(optimizeStartTime).count() << " ms";
457 }
458 catch (std::exception& ex)
459 {
460 std::stringstream exMessage;
461 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from optimize.";
462 throw armnn::Exception(exMessage.str());
463 }
464 if (!optNet)
465 {
466 // Optimize failed
467 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to optimize the network!");
468 }
469
470 // If set, we will serialize the optimized model into a dot file.
471 const std::string serializeToDotFile = delegate->m_Options.GetSerializeToDot();
472 if (!serializeToDotFile.empty())
473 {
474 ARMNN_LOG(info) << "Writing graph to dot file: " << serializeToDotFile;
475 fs::path filename = serializeToDotFile;
476 std::fstream file(filename.c_str(), std::ios_base::out);
477 optNet->SerializeToDot(file);
478 }
479
480 try
481 {
482 const auto loadStartTime = armnn::GetTimeNow();
483
484 // Load graph into runtime
485 std::string errorMessage;
486 armnn::Status loadingStatus;
487 armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
488 armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
489 // There's a bit of an assumption here that the delegate will only support Malloc memory source.
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000490 if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100491 {
492 inputSource = armnn::MemorySource::Malloc;
493 }
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000494 if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100495 {
496 outputSource = armnn::MemorySource::Malloc;
497 }
498 armnn::INetworkProperties networkProperties(false,
499 inputSource,
500 outputSource,
501 delegate->m_Options.GetInternalProfilingState(),
502 delegate->m_Options.GetInternalProfilingDetail());
503 loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
504 std::move(optNet),
505 errorMessage,
506 networkProperties);
507 if (loadingStatus != armnn::Status::Success)
508 {
509 // Network load failed.
510 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Network could not be loaded: " + errorMessage);
511 }
512
513 ARMNN_LOG(info) << "Load ArmnnSubgraph time: " << std::setprecision(2)
514 << std::fixed << armnn::GetTimeDuration(loadStartTime).count() << " ms";
515 }
516 catch (std::exception& ex)
517 {
518 std::stringstream exMessage;
519 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
520 throw armnn::Exception(exMessage.str());
521 }
522
523 // Register debug callback function
524 if (delegate->m_Options.GetDebugCallbackFunction().has_value())
525 {
526 delegate->m_Runtime->RegisterDebugCallback(networkId, delegate->m_Options.GetDebugCallbackFunction().value());
527 }
528
529 ARMNN_LOG(info) << "Overall ArmnnSubgraph creation time: " << std::setprecision(2)
530 << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms\n";
531
532 // Create a new SubGraph with networkId and runtime
533 return new ArmnnSubgraph(networkId, delegate->m_Runtime, inputBindings, outputBindings);
534}
535
536TfLiteStatus ArmnnSubgraph::Prepare(TfLiteOpaqueContext* tfLiteContext)
537{
538 armnn::IgnoreUnused(tfLiteContext);
539 return kTfLiteOk;
540}
541
542TfLiteStatus ArmnnSubgraph::Invoke(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode)
543{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100544 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
545 // This function turns inputIndexArray into an int array of indices. These indices point to the tensors for
546 // each input slot in the node.
547 const int* inputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100548 int numInputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100549 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100550 {
551 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph inputs!");
552 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100553 // Prepare inputs
554 armnn::InputTensors inputTensors;
555 size_t inputIndex = 0;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100556 for (int inputIdx = 0; inputIdx < numInputs; inputIdx++)
557 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100558 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputIndexArray[inputIdx]);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100559
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100560 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100561 {
562 return kTfLiteError;
563 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100564 // If tensor is not read only
Ryan OSheaac9607f2023-04-03 11:33:33 +0100565 if (TfLiteOpaqueTensorGetAllocationType(tensor) != kTfLiteMmapRo)
566 {
567 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
568 armnn::TensorInfo inputTensorInfo = inputBinding.second;
569 inputTensorInfo.SetConstant(true);
570 const armnn::ConstTensor inputTensor(inputTensorInfo, TfLiteOpaqueTensorData(tensor));
Narumol Prangnawarat46e574e2023-05-05 16:39:05 +0100571 inputTensors.emplace_back(inputIndexArray[inputIdx], inputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100572
573 ++inputIndex;
574 }
575 }
576
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100577 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
578 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
579 // each output slot in the node.
580 const int* outputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100581 int numOutputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100582 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100583 {
584 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph outputs!");
585 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100586 // Assign the tensors from the outputIndexArray to the armnn BindingPointInfo
587 armnn::OutputTensors outputTensors;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100588 for (int outputIdx = 0; outputIdx < numOutputs; outputIdx++)
589 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100590 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIdx];
591 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputIndexArray[outputIdx]);
592 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100593 {
594 return kTfLiteError;
595 }
596
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100597 const armnn::Tensor outputTensor(outputBinding.second, reinterpret_cast<TfLiteTensor*>(tensor)->data
598 .data);
599 outputTensors.emplace_back(outputIndexArray[outputIdx], outputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100600 }
601
602 // Run graph
David Monahan727d0172023-10-04 10:16:24 +0100603 try
Ryan OSheaac9607f2023-04-03 11:33:33 +0100604 {
David Monahan727d0172023-10-04 10:16:24 +0100605 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
606 // The delegate holds its own Arm NN runtime so this is our last chance to print internal profiling data.
607 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
608 if (profiler && profiler->IsProfilingEnabled())
609 {
610 profiler->Print(std::cout);
611 }
612 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100613 }
David Monahan727d0172023-10-04 10:16:24 +0100614 catch (armnn::InvalidArgumentException& ex)
615 {
616 ARMNN_LOG(error) << "ArmNN Failed to EnqueueWorkload with error: " << ex.what();
617 // This should really be kTfLiteDelegateError but the Delegate Test Suite expects kTfLiteError so we return
618 // that instead
619 return kTfLiteError;
620 }
621
Ryan OSheaac9607f2023-04-03 11:33:33 +0100622}
623
624TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
625 TfLiteOpaqueContext* tfLiteContext,
626 TfLiteRegistrationExternal* tfLiteRegistration,
627 TfLiteOpaqueNode* tfLiteNode,
628 int nodeIndex)
629{
630 switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
631 {
Teresa Charlinf69ae562023-04-27 14:42:23 +0100632 case kTfLiteBuiltinAbs:
633 return VisitElementwiseUnaryOperator(delegateData,
634 tfLiteContext,
635 tfLiteNode,
636 nodeIndex,
637 kTfLiteBuiltinAbs,
638 armnn::UnaryOperation::Abs);
David Monahan6c53f9f2023-04-27 15:21:19 +0100639 case kTfLiteBuiltinAdd:
640 return VisitElementwiseBinaryOperator(delegateData,
641 tfLiteContext,
642 tfLiteNode,
643 nodeIndex,
644 kTfLiteBuiltinAdd);
John Mcloughlin559d9092023-04-26 20:14:47 +0100645 case kTfLiteBuiltinArgMax:
646 return VisitArgMinMaxOperator(delegateData,
647 tfLiteContext,
648 tfLiteNode,
649 nodeIndex,
650 kTfLiteBuiltinArgMax);
651 case kTfLiteBuiltinArgMin:
652 return VisitArgMinMaxOperator(delegateData,
653 tfLiteContext,
654 tfLiteNode,
655 nodeIndex,
656 kTfLiteBuiltinArgMin);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100657 case kTfLiteBuiltinAveragePool2d:
658 return VisitPooling2dOperator(delegateData,
659 tfLiteContext,
660 tfLiteNode,
661 nodeIndex,
662 kTfLiteBuiltinAveragePool2d);
John Mcloughlin0422cf22023-04-27 16:55:00 +0100663 case kTfLiteBuiltinBatchMatmul:
664 return VisitBatchMatMulOperator(delegateData,
665 tfLiteContext,
666 tfLiteNode,
667 nodeIndex,
668 kTfLiteBuiltinBatchMatmul);
Idriss Chaouchcbf79292023-09-08 11:18:16 +0100669 case kTfLiteBuiltinBroadcastTo:
670 return VisitBroadcastToOperator(delegateData,
671 tfLiteContext,
672 tfLiteNode,
673 nodeIndex,
674 kTfLiteBuiltinBroadcastTo);
Kevin May81b66f32023-04-26 14:55:36 +0100675 case kTfLiteBuiltinBatchToSpaceNd:
676 return VisitBatchToSpaceNdOperator(delegateData,
677 tfLiteContext,
678 tfLiteNode,
679 nodeIndex,
680 kTfLiteBuiltinBatchToSpaceNd);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100681 case kTfLiteBuiltinCast:
682 return VisitCastOperator(delegateData,
683 tfLiteContext,
684 tfLiteNode,
685 nodeIndex,
686 kTfLiteBuiltinCast);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100687 case kTfLiteBuiltinCeil:
688 return VisitElementwiseUnaryOperator(delegateData,
689 tfLiteContext,
690 tfLiteNode,
691 nodeIndex,
692 kTfLiteBuiltinCeil,
693 armnn::UnaryOperation::Ceil);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100694 case kTfLiteBuiltinConcatenation:
695 return VisitControlOperator(delegateData,
696 tfLiteContext,
697 tfLiteNode,
698 nodeIndex,
699 kTfLiteBuiltinConcatenation);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100700 case kTfLiteBuiltinConv2d:
701 return VisitConvolutionOperator(delegateData,
702 tfLiteContext,
703 tfLiteNode,
704 nodeIndex,
705 kTfLiteBuiltinConv2d);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100706 case kTfLiteBuiltinConv3d:
707 return VisitConvolutionOperator(delegateData,
708 tfLiteContext,
709 tfLiteNode,
710 nodeIndex,
711 kTfLiteBuiltinConv3d);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100712 case kTfLiteBuiltinCustom:
713 {
714 // Custom operators are defined by the name rather than the builtin code.
715 // Parse the custom_name param in the registration to point to the correct visitor function.
716 std::string customOperatorName = TfLiteRegistrationExternalGetCustomName(tfLiteRegistration);
717 if ( customOperatorName == "AveragePool3D" )
718 {
719 return VisitPooling3dOperator(delegateData,
720 tfLiteContext,
721 tfLiteNode,
722 nodeIndex,
723 customOperatorName);
724 }
725 else if (customOperatorName == "MaxPool3D")
726 {
727 return VisitPooling3dOperator(delegateData,
728 tfLiteContext,
729 tfLiteNode,
730 nodeIndex,
731 customOperatorName);
732 }
733 // Invalid or unsupported custom operator
734 return kTfLiteError;
735 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100736 case kTfLiteBuiltinDepthwiseConv2d:
737 return VisitConvolutionOperator(delegateData,
738 tfLiteContext,
739 tfLiteNode,
740 nodeIndex,
741 kTfLiteBuiltinDepthwiseConv2d);
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100742 case kTfLiteBuiltinDequantize:
743 return VisitDequantizeOperator(delegateData,
744 tfLiteContext,
745 tfLiteNode,
746 nodeIndex,
747 kTfLiteBuiltinDequantize);
David Monahan6c53f9f2023-04-27 15:21:19 +0100748 case kTfLiteBuiltinDiv:
749 return VisitElementwiseBinaryOperator(delegateData,
750 tfLiteContext,
751 tfLiteNode,
752 nodeIndex,
753 kTfLiteBuiltinDiv);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100754 case kTfLiteBuiltinEqual:
755 return VisitComparisonOperator(delegateData,
756 tfLiteContext,
757 tfLiteNode,
758 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100759 kTfLiteBuiltinEqual,
760 armnn::ComparisonOperation::Equal);
Teresa Charlin42362962023-04-28 14:23:33 +0100761 case kTfLiteBuiltinDepthToSpace:
762 return VisitDepthToSpaceOperator(delegateData,
763 tfLiteContext,
764 tfLiteNode,
765 nodeIndex,
766 kTfLiteBuiltinDepthToSpace);
767 case kTfLiteBuiltinElu:
768 return VisitActivationOperator(delegateData,
769 tfLiteContext,
770 tfLiteNode,
771 nodeIndex,
772 kTfLiteBuiltinElu);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100773 case kTfLiteBuiltinExp:
774 return VisitElementwiseUnaryOperator(delegateData,
775 tfLiteContext,
776 tfLiteNode,
777 nodeIndex,
778 kTfLiteBuiltinExp,
779 armnn::UnaryOperation::Exp);
Matthew Sloyan3504e422023-05-03 13:53:02 +0100780 case kTfLiteBuiltinExpandDims:
781 return VisitExpandDimsOperator(delegateData,
782 tfLiteContext,
783 tfLiteNode,
784 nodeIndex,
785 kTfLiteBuiltinExpandDims);
Ryan OShea59f8f652023-05-11 20:37:53 +0100786 case kTfLiteBuiltinFill:
787 return VisitFillOperator(delegateData,
788 tfLiteContext,
789 tfLiteNode,
790 nodeIndex,
791 kTfLiteBuiltinFill);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100792 case kTfLiteBuiltinFloor:
793 return VisitFloorOperator(delegateData,
794 tfLiteContext,
795 tfLiteNode,
796 nodeIndex,
797 kTfLiteBuiltinFloor);
David Monahan6c53f9f2023-04-27 15:21:19 +0100798 case kTfLiteBuiltinFloorDiv:
799 return VisitElementwiseBinaryOperator(delegateData,
800 tfLiteContext,
801 tfLiteNode,
802 nodeIndex,
803 kTfLiteBuiltinFloorDiv);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100804 case kTfLiteBuiltinFullyConnected:
805 return VisitFullyConnectedOperator(delegateData,
806 tfLiteContext,
807 tfLiteNode,
808 nodeIndex,
809 kTfLiteBuiltinFullyConnected);
Kevin Mayb2831c52023-04-26 17:27:24 +0100810 case kTfLiteBuiltinGather:
811 return VisitGatherOperator(delegateData,
812 tfLiteContext,
813 tfLiteNode,
814 nodeIndex,
815 kTfLiteBuiltinGather);
816 case kTfLiteBuiltinGatherNd:
817 return VisitGatherNdOperator(delegateData,
818 tfLiteContext,
819 tfLiteNode,
820 nodeIndex,
821 kTfLiteBuiltinGatherNd);
Teresa Charlin077cddb2023-09-15 15:19:21 +0100822 case kTfLiteBuiltinGelu:
823 return VisitActivationOperator(delegateData,
824 tfLiteContext,
825 tfLiteNode,
826 nodeIndex,
827 kTfLiteBuiltinGelu);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100828 case kTfLiteBuiltinGreater:
829 return VisitComparisonOperator(delegateData,
830 tfLiteContext,
831 tfLiteNode,
832 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100833 kTfLiteBuiltinGreater,
834 armnn::ComparisonOperation::Greater);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100835 case kTfLiteBuiltinGreaterEqual:
836 return VisitComparisonOperator(delegateData,
837 tfLiteContext,
838 tfLiteNode,
839 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100840 kTfLiteBuiltinGreaterEqual,
841 armnn::ComparisonOperation::GreaterOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100842 case kTfLiteBuiltinHardSwish:
843 return VisitActivationOperator(delegateData,
844 tfLiteContext,
845 tfLiteNode,
846 nodeIndex,
847 kTfLiteBuiltinHardSwish);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100848 case kTfLiteBuiltinL2Normalization:
849 return VisitL2NormalizationOperator(delegateData,
850 tfLiteContext,
851 tfLiteNode,
852 nodeIndex,
853 kTfLiteBuiltinL2Normalization);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100854 case kTfLiteBuiltinL2Pool2d:
855 return VisitPooling2dOperator(delegateData,
856 tfLiteContext,
857 tfLiteNode,
858 nodeIndex,
859 kTfLiteBuiltinL2Pool2d);
Tianle Chengae931732023-07-28 11:53:04 +0100860 case kTfLiteBuiltinLeakyRelu:
861 return VisitActivationOperator(delegateData,
862 tfLiteContext,
863 tfLiteNode,
864 nodeIndex,
865 kTfLiteBuiltinLeakyRelu);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100866 case kTfLiteBuiltinLess:
867 return VisitComparisonOperator(delegateData,
868 tfLiteContext,
869 tfLiteNode,
870 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100871 kTfLiteBuiltinLess,
872 armnn::ComparisonOperation::Less);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100873 case kTfLiteBuiltinLessEqual:
874 return VisitComparisonOperator(delegateData,
875 tfLiteContext,
876 tfLiteNode,
877 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100878 kTfLiteBuiltinLessEqual,
879 armnn::ComparisonOperation::LessOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100880 case kTfLiteBuiltinLogistic:
881 return VisitActivationOperator(delegateData,
882 tfLiteContext,
883 tfLiteNode,
884 nodeIndex,
885 kTfLiteBuiltinLogistic);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100886 case kTfLiteBuiltinLocalResponseNormalization:
887 return VisitLocalResponseNormalizationOperator(delegateData,
888 tfLiteContext,
889 tfLiteNode,
890 nodeIndex,
891 kTfLiteBuiltinLocalResponseNormalization);
892 case kTfLiteBuiltinLog:
893 return VisitElementwiseUnaryOperator(delegateData,
894 tfLiteContext,
895 tfLiteNode,
896 nodeIndex,
897 kTfLiteBuiltinLog,
898 armnn::UnaryOperation::Log);
899 case kTfLiteBuiltinLogicalAnd:
900 return VisitLogicalBinaryOperator(delegateData,
901 tfLiteContext,
902 tfLiteNode,
903 nodeIndex,
904 kTfLiteBuiltinLogicalAnd,
905 armnn::LogicalBinaryOperation::LogicalAnd);
906 case kTfLiteBuiltinLogicalNot:
907 return VisitElementwiseUnaryOperator(delegateData,
908 tfLiteContext,
909 tfLiteNode,
910 nodeIndex,
911 kTfLiteBuiltinLogicalNot,
912 armnn::UnaryOperation::LogicalNot);
913 case kTfLiteBuiltinLogicalOr:
914 return VisitLogicalBinaryOperator(delegateData,
915 tfLiteContext,
916 tfLiteNode,
917 nodeIndex,
918 kTfLiteBuiltinLogicalOr,
919 armnn::LogicalBinaryOperation::LogicalOr);
Teresa Charlin42362962023-04-28 14:23:33 +0100920 case kTfLiteBuiltinLogSoftmax:
921 return VisitSoftmaxOperator(delegateData,
922 tfLiteContext,
923 tfLiteNode,
924 nodeIndex,
925 kTfLiteBuiltinLogSoftmax);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100926 case kTfLiteBuiltinLstm:
927 return VisitLstmOperator(delegateData,
928 tfLiteContext,
929 tfLiteNode,
930 nodeIndex,
931 kTfLiteBuiltinLstm);
932 case kTfLiteBuiltinMaxPool2d:
933 return VisitPooling2dOperator(delegateData,
934 tfLiteContext,
935 tfLiteNode,
936 nodeIndex,
937 kTfLiteBuiltinMaxPool2d);
David Monahan6c53f9f2023-04-27 15:21:19 +0100938 case kTfLiteBuiltinMaximum:
939 return VisitElementwiseBinaryOperator(delegateData,
940 tfLiteContext,
941 tfLiteNode,
942 nodeIndex,
943 kTfLiteBuiltinMaximum);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100944 case kTfLiteBuiltinMean:
945 return VisitControlOperator(delegateData,
946 tfLiteContext,
947 tfLiteNode,
948 nodeIndex,
949 kTfLiteBuiltinMean);
David Monahan6c53f9f2023-04-27 15:21:19 +0100950 case kTfLiteBuiltinMinimum:
951 return VisitElementwiseBinaryOperator(delegateData,
952 tfLiteContext,
953 tfLiteNode,
954 nodeIndex,
955 kTfLiteBuiltinMinimum);
Ryan OShea59f8f652023-05-11 20:37:53 +0100956 case kTfLiteBuiltinMirrorPad:
957 return VisitPadOperator(delegateData,
958 tfLiteContext,
959 tfLiteNode,
960 nodeIndex,
961 kTfLiteBuiltinMirrorPad);
David Monahan6c53f9f2023-04-27 15:21:19 +0100962 case kTfLiteBuiltinMul:
963 return VisitElementwiseBinaryOperator(delegateData,
964 tfLiteContext,
965 tfLiteNode,
966 nodeIndex,
967 kTfLiteBuiltinMul);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100968 case kTfLiteBuiltinNeg:
969 return VisitElementwiseUnaryOperator(delegateData,
970 tfLiteContext,
971 tfLiteNode,
972 nodeIndex,
973 kTfLiteBuiltinNeg,
974 armnn::UnaryOperation::Neg);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100975 case kTfLiteBuiltinNotEqual:
976 return VisitComparisonOperator(delegateData,
977 tfLiteContext,
978 tfLiteNode,
979 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100980 kTfLiteBuiltinNotEqual,
981 armnn::ComparisonOperation::NotEqual);
Teresa Charlinecebb0f2023-04-27 21:37:56 +0100982 case kTfLiteBuiltinPack:
983 return VisitPackOperator(delegateData,
984 tfLiteContext,
985 tfLiteNode,
986 nodeIndex,
987 kTfLiteBuiltinPack);
988 case kTfLiteBuiltinPad:
989 return VisitPadOperator(delegateData,
990 tfLiteContext,
991 tfLiteNode,
992 nodeIndex,
993 kTfLiteBuiltinPad);
994 case kTfLiteBuiltinPadv2:
995 return VisitPadOperator(delegateData,
996 tfLiteContext,
997 tfLiteNode,
998 nodeIndex,
999 kTfLiteBuiltinPadv2);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001000 case kTfLiteBuiltinPow:
1001 return VisitElementwiseBinaryOperator(delegateData,
1002 tfLiteContext,
1003 tfLiteNode,
1004 nodeIndex,
1005 kTfLiteBuiltinPow);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001006 case kTfLiteBuiltinPrelu:
1007 return VisitPreluOperator(delegateData,
1008 tfLiteContext,
1009 tfLiteNode,
1010 nodeIndex,
1011 kTfLiteBuiltinPrelu);
Francis Murtagh36d94ef2023-04-28 14:05:43 +01001012 case kTfLiteBuiltinQuantize:
1013 return VisitQuantizeOperator(delegateData,
1014 tfLiteContext,
1015 tfLiteNode,
1016 nodeIndex,
1017 kTfLiteBuiltinQuantize);
John Mcloughlin083586d2023-04-28 18:36:52 +01001018 case kTfLiteBuiltinReduceMax:
1019 return VisitReduceOperator(delegateData,
1020 tfLiteContext,
1021 tfLiteNode,
1022 nodeIndex,
1023 kTfLiteBuiltinReduceMax);
1024 case kTfLiteBuiltinReduceMin:
1025 return VisitReduceOperator(delegateData,
1026 tfLiteContext,
1027 tfLiteNode,
1028 nodeIndex,
1029 kTfLiteBuiltinReduceMin);
1030 case kTfLiteBuiltinReduceProd:
1031 return VisitReduceOperator(delegateData,
1032 tfLiteContext,
1033 tfLiteNode,
1034 nodeIndex,
1035 kTfLiteBuiltinReduceProd);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001036 case kTfLiteBuiltinRelu:
1037 return VisitActivationOperator(delegateData,
1038 tfLiteContext,
1039 tfLiteNode,
1040 nodeIndex,
1041 kTfLiteBuiltinRelu);
1042 case kTfLiteBuiltinReluN1To1:
1043 return VisitActivationOperator(delegateData,
1044 tfLiteContext,
1045 tfLiteNode,
1046 nodeIndex,
1047 kTfLiteBuiltinReluN1To1);
1048 case kTfLiteBuiltinRelu6:
1049 return VisitActivationOperator(delegateData,
1050 tfLiteContext,
1051 tfLiteNode,
1052 nodeIndex,
1053 kTfLiteBuiltinRelu6);
Matthew Sloyanc49aacc2023-04-28 17:27:26 +01001054 case kTfLiteBuiltinReshape:
1055 return VisitReshapeOperator(delegateData,
1056 tfLiteContext,
1057 tfLiteNode,
1058 nodeIndex,
1059 kTfLiteBuiltinReshape);
John Mcloughlin083586d2023-04-28 18:36:52 +01001060 case kTfLiteBuiltinResizeNearestNeighbor:
1061 return VisitResizeOperator(delegateData,
1062 tfLiteContext,
1063 tfLiteNode,
1064 nodeIndex,
1065 kTfLiteBuiltinResizeNearestNeighbor);
1066 case kTfLiteBuiltinResizeBilinear:
1067 return VisitResizeOperator(delegateData,
1068 tfLiteContext,
1069 tfLiteNode,
1070 nodeIndex,
1071 kTfLiteBuiltinResizeBilinear);
Tracy Narine7306bbe2023-07-17 16:06:26 +01001072 case kTfLiteBuiltinReverseV2:
1073 return VisitReverseV2Operator(delegateData,
1074 tfLiteContext,
1075 tfLiteNode,
1076 nodeIndex,
1077 kTfLiteBuiltinReverseV2);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001078 case kTfLiteBuiltinRsqrt:
1079 return VisitElementwiseUnaryOperator(delegateData,
1080 tfLiteContext,
1081 tfLiteNode,
1082 nodeIndex,
1083 kTfLiteBuiltinRsqrt,
1084 armnn::UnaryOperation::Rsqrt);
John Mcloughlin0422cf22023-04-27 16:55:00 +01001085 case kTfLiteBuiltinShape:
1086 return VisitShapeOperator(delegateData,
1087 tfLiteContext,
1088 tfLiteNode,
1089 nodeIndex,
1090 kTfLiteBuiltinShape);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001091 case kTfLiteBuiltinSin:
1092 return VisitElementwiseUnaryOperator(delegateData,
1093 tfLiteContext,
1094 tfLiteNode,
1095 nodeIndex,
1096 kTfLiteBuiltinSin,
1097 armnn::UnaryOperation::Sin);
Teresa Charlin86b03572023-04-28 13:19:12 +01001098 case kTfLiteBuiltinSlice:
1099 return VisitSliceOperator(delegateData,
1100 tfLiteContext,
1101 tfLiteNode,
1102 nodeIndex,
1103 kTfLiteBuiltinSlice);
Teresa Charlin42362962023-04-28 14:23:33 +01001104 case kTfLiteBuiltinSoftmax:
1105 return VisitSoftmaxOperator(delegateData,
1106 tfLiteContext,
1107 tfLiteNode,
1108 nodeIndex,
1109 kTfLiteBuiltinSoftmax);
Kevin May81b66f32023-04-26 14:55:36 +01001110 case kTfLiteBuiltinSpaceToBatchNd:
1111 return VisitSpaceToBatchNdOperator(delegateData,
1112 tfLiteContext,
1113 tfLiteNode,
1114 nodeIndex,
1115 kTfLiteBuiltinSpaceToBatchNd);
Teresa Charlin42362962023-04-28 14:23:33 +01001116 case kTfLiteBuiltinSpaceToDepth:
1117 return VisitSpaceToDepthOperator(delegateData,
1118 tfLiteContext,
1119 tfLiteNode,
1120 nodeIndex,
1121 kTfLiteBuiltinSpaceToDepth);
David Monahanc833cef2023-05-03 15:53:03 +01001122 case kTfLiteBuiltinSplit:
1123 return VisitSplitOperator(delegateData,
1124 tfLiteContext,
1125 tfLiteNode,
1126 nodeIndex,
1127 kTfLiteBuiltinSplit);
1128 case kTfLiteBuiltinSplitV:
1129 return VisitSplitVOperator(delegateData,
1130 tfLiteContext,
1131 tfLiteNode,
1132 nodeIndex,
1133 kTfLiteBuiltinSplitV);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001134 case kTfLiteBuiltinSquaredDifference:
1135 return VisitElementwiseBinaryOperator(delegateData,
1136 tfLiteContext,
1137 tfLiteNode,
1138 nodeIndex,
1139 kTfLiteBuiltinSquaredDifference);
David Monahan6c53f9f2023-04-27 15:21:19 +01001140 case kTfLiteBuiltinSub:
1141 return VisitElementwiseBinaryOperator(delegateData,
1142 tfLiteContext,
1143 tfLiteNode,
1144 nodeIndex,
1145 kTfLiteBuiltinSub);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001146 case kTfLiteBuiltinSqrt:
1147 return VisitElementwiseUnaryOperator(delegateData,
1148 tfLiteContext,
1149 tfLiteNode,
1150 nodeIndex,
1151 kTfLiteBuiltinSqrt,
1152 armnn::UnaryOperation::Sqrt);
Matthew Sloyan3504e422023-05-03 13:53:02 +01001153 case kTfLiteBuiltinSqueeze:
1154 return VisitSqueezeOperator(delegateData,
1155 tfLiteContext,
1156 tfLiteNode,
1157 nodeIndex,
1158 kTfLiteBuiltinSqueeze);
Teresa Charlin86b03572023-04-28 13:19:12 +01001159 case kTfLiteBuiltinStridedSlice:
1160 return VisitStridedSliceOperator(delegateData,
1161 tfLiteContext,
1162 tfLiteNode,
1163 nodeIndex,
1164 kTfLiteBuiltinStridedSlice);
John Mcloughlin083586d2023-04-28 18:36:52 +01001165 case kTfLiteBuiltinSum:
1166 return VisitReduceOperator(delegateData,
1167 tfLiteContext,
1168 tfLiteNode,
1169 nodeIndex,
1170 kTfLiteBuiltinSum);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001171 case kTfLiteBuiltinTanh:
1172 return VisitActivationOperator(delegateData,
1173 tfLiteContext,
1174 tfLiteNode,
1175 nodeIndex,
1176 kTfLiteBuiltinTanh);
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001177 case kTfLiteBuiltinTile:
1178 return VisitTileOperator(delegateData,
1179 tfLiteContext,
1180 tfLiteNode,
1181 nodeIndex,
1182 kTfLiteBuiltinTile);
Teresa Charlin42362962023-04-28 14:23:33 +01001183 case kTfLiteBuiltinTranspose:
1184 return VisitTransposeOperator(delegateData,
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001185 tfLiteContext,
1186 tfLiteNode,
1187 nodeIndex,
1188 kTfLiteBuiltinTranspose);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001189 case kTfLiteBuiltinTransposeConv:
1190 return VisitConvolutionOperator(delegateData,
1191 tfLiteContext,
1192 tfLiteNode,
1193 nodeIndex,
1194 kTfLiteBuiltinTransposeConv);
Matthew Sloyan74be13e2023-05-03 17:34:00 +01001195 case kTfLiteBuiltinUnidirectionalSequenceLstm:
1196 return VisitUnidirectionalSequenceLstmOperator(delegateData,
1197 tfLiteContext,
1198 tfLiteNode,
1199 nodeIndex,
1200 kTfLiteBuiltinUnidirectionalSequenceLstm);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001201 case kTfLiteBuiltinUnpack:
1202 return VisitUnpackOperator(delegateData,
1203 tfLiteContext,
1204 tfLiteNode,
1205 nodeIndex,
1206 kTfLiteBuiltinUnpack);
Ryan OSheaac9607f2023-04-03 11:33:33 +01001207 default:
1208 return kTfLiteError;
1209 }
1210}
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001211} // armnnOpaqueDelegate namespace