blob: 2340e4910e13d0c1d232c2ab2f3e299abf2b5524 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Ryan OSheaac9607f2023-04-03 11:33:33 +01007#include <OpaqueDelegateUtils.hpp>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00008
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00009#include "Activation.hpp"
10#include "ArgMinMax.hpp"
11#include "BatchMatMul.hpp"
12#include "BatchSpace.hpp"
Idriss Chaouchcbf79292023-09-08 11:18:16 +010013#include "BroadcastTo.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000014#include "Comparison.hpp"
15#include "Convolution.hpp"
16#include "Control.hpp"
17#include "ElementwiseBinary.hpp"
18#include "ElementwiseUnary.hpp"
19#include "Fill.hpp"
20#include "FullyConnected.hpp"
21#include "Gather.hpp"
22#include "GatherNd.hpp"
23#include "LogicalBinary.hpp"
24#include "Lstm.hpp"
25#include "Normalization.hpp"
26#include "Pack.hpp"
27#include "Pad.hpp"
28#include "Pooling.hpp"
29#include "Prelu.hpp"
30#include "Quantization.hpp"
31#include "Redefine.hpp"
32#include "Reduce.hpp"
33#include "Resize.hpp"
Tracy Narine7306bbe2023-07-17 16:06:26 +010034#include "ReverseV2.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000035#include "Round.hpp"
36#include "Shape.hpp"
37#include "Slice.hpp"
38#include "StridedSlice.hpp"
39#include "Softmax.hpp"
40#include "SpaceDepth.hpp"
41#include "Split.hpp"
Tianle Cheng92ce35c2023-07-25 16:41:00 +010042#include "Tile.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000043#include "Transpose.hpp"
44#include "UnidirectionalSequenceLstm.hpp"
45#include "Unpack.hpp"
46
47#include <armnn/utility/IgnoreUnused.hpp>
48#include <armnnUtils/Filesystem.hpp>
49#include <armnn/utility/Timer.hpp>
50#include <flatbuffers/flatbuffers.h>
51#include <tensorflow/lite/context_util.h>
52#include <tensorflow/lite/schema/schema_generated.h>
53#include <tensorflow/lite/minimal_logging.h>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000054
55#include <algorithm>
56#include <iostream>
57#include <sstream>
Teresa Charlin19ad8162023-10-04 11:17:03 +010058#include <regex>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000059
60namespace armnnOpaqueDelegate
61{
62
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010063static auto* g_delegate_plugin_ArmnnDelegatePlugin_ =
Ryan OShea59f8f652023-05-11 20:37:53 +010064 new tflite::delegates::DelegatePluginRegistry::Register("armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010065 ArmnnDelegatePlugin::New);
66
Teresa Charlin19ad8162023-10-04 11:17:03 +010067armnnDelegate::DelegateOptions ParseArmNNSettings(const tflite::TFLiteSettings* tfLiteSettings)
68{
69 const tflite::ArmNNSettings* settings = tfLiteSettings->armnn_settings();
70 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(settings,
71 "The passed TFLiteSettings did not contain a valid ArmNNSettings");
72
73 // Extract settings fields
74 bool fastmath = settings->fastmath();
75 std::string backends_str = (settings->backends()) ? settings->backends()->str() : "";
76 const ::flatbuffers::String* additional_parameters = settings->additional_parameters();
77
78 // Build additional parameters string
79 std::string additional_parameters_str;
80 if (additional_parameters)
81 {
82 additional_parameters_str = additional_parameters->str();
83
84 // Apply a regex to remove spaces around the = and , signs
85 std::regex regex_equals_str("[ ]*=[ ]*");
86 std::regex regex_comma_str("[ ]*,[ ]*");
87 additional_parameters_str = std::regex_replace(additional_parameters_str, regex_equals_str, "=");
88 additional_parameters_str = std::regex_replace(additional_parameters_str, regex_comma_str, ",");
89 }
90
91 // Build a std::pair list of option names and values
92 std::vector<std::pair<std::string, std::string>> options;
93 options.emplace_back(std::pair<std::string, std::string>("backends", backends_str));
94 options.emplace_back(std::pair<std::string, std::string>("enable-fast-math", (fastmath) ? "true" : "false"));
95
96 std::stringstream additional_parameters_ss(additional_parameters_str);
97 while (additional_parameters_ss.good())
98 {
99 std::string option_str;
100 getline( additional_parameters_ss, option_str, ',' );
101 size_t n = option_str.find("=");
102 if (n != std::string::npos)
103 {
104 std::string name = option_str.substr(0, n);
105 std::string value = option_str.substr(n + 1, std::string::npos);
106 options.emplace_back(std::pair<std::string, std::string>(name, value));
107 }
108 }
109
110 // Build the key and value lists to pass into the constructor of the DelegateOptions
111 size_t num_options = options.size();
112 std::unique_ptr<const char*> options_keys = std::unique_ptr<const char*>(new const char*[num_options + 1]);
113 std::unique_ptr<const char*> options_values = std::unique_ptr<const char*>(new const char*[num_options + 1]);
114
115 for (size_t i=0; i<num_options; ++i)
116 {
117 options_keys.get()[i] = options[i].first.c_str();
118 options_values.get()[i] = options[i].second.c_str();
119 }
120
121 // Finally call the constructor
122 armnnDelegate::DelegateOptions delegateOptions = armnnDelegate::DelegateOptions(options_keys.get(),
123 options_values.get(),
124 num_options,
125 nullptr);
126
127 return delegateOptions;
128}
129
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000130ArmnnOpaqueDelegate::ArmnnOpaqueDelegate(armnnDelegate::DelegateOptions options)
131 : m_Options(std::move(options))
132{
133 // Configures logging for ARMNN
134 if (m_Options.IsLoggingEnabled())
135 {
136 armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
137 }
138 // Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all armnn_delegate
139 // instances so the RuntimeOptions cannot be altered for different armnn_delegate instances.
140 m_Runtime = GetRuntime(m_Options.GetRuntimeOptions());
141 std::vector<armnn::BackendId> backends;
142 if (m_Runtime)
143 {
144 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
145 for (auto& backend : m_Options.GetBackends())
146 {
147 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
148 {
149 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100150 "TfLiteArmnnOpaqueDelegate: Requested unknown backend %s", backend.Get().c_str());
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000151 }
152 else
153 {
154 backends.push_back(backend);
155 }
156 }
157 }
158
159 if (backends.empty())
160 {
161 // No known backend specified
162 throw armnn::InvalidArgumentException("TfLiteArmnnOpaqueDelegate: No known backend specified.");
163 }
164 m_Options.SetBackends(backends);
165
166 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnOpaqueDelegate: Created TfLite ArmNN delegate.");
167}
168
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100169TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate* tfLiteDelegate, void* data)
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100170{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100171 // We are required to have the void* data parameter in the function signature, but we don't actually use it.
172 armnn::IgnoreUnused(data);
173
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100174 TfLiteIntArray* supportedOperators =
175 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100176 (TfLiteOpaqueDelegateGetData(tfLiteDelegate))->IdentifyOperatorsToDelegate(tfLiteContext);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100177 if(supportedOperators == nullptr)
178 {
179 return kTfLiteError;
180 }
181
182 // ArmNN Opaque Delegate Registration
183 TfLiteRegistrationExternal* kernelRegistration =
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100184 TfLiteRegistrationExternalCreate(kTfLiteBuiltinDelegate,
Ryan OShea59f8f652023-05-11 20:37:53 +0100185 "armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100186 /*version=*/OPAQUE_DELEGATE_MAJOR_VERSION);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100187 if(kernelRegistration == nullptr)
188 {
189 return kTfLiteError;
190 }
191
192 TfLiteRegistrationExternalSetInit(
193 kernelRegistration,
194 [](TfLiteOpaqueContext* tfLiteContext, const char* buffer, size_t length) -> void*
195 {
196 armnn::IgnoreUnused(length);
197 const TfLiteOpaqueDelegateParams* parameters =
198 reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
199 if(parameters == nullptr)
200 {
201 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
202 "TfLiteArmnnOpaqueDelegate: Unable to get parameters.");
203 return nullptr;
204 }
205
206 return static_cast<void*>(
207 ArmnnSubgraph::Create(tfLiteContext,
208 parameters,
209 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100210 parameters->delegate->opaque_delegate_builder->data)));
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100211 }
212 );
213
214 TfLiteRegistrationExternalSetFree(
215 kernelRegistration,
216 [](TfLiteOpaqueContext* tfLiteContext, void* buffer) -> void
217 {
218 armnn::IgnoreUnused(tfLiteContext);
219 if (buffer != nullptr)
220 {
221 delete static_cast<ArmnnSubgraph*>(buffer);
222 }
223 }
224 );
225
226 TfLiteRegistrationExternalSetPrepare(
227 kernelRegistration,
228 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
229 {
230 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
231 if (userData == nullptr)
232 {
233 return kTfLiteError;
234 }
235 return static_cast<ArmnnSubgraph*>(userData)->Prepare(tfLiteContext);
236 }
237 );
238
239 TfLiteRegistrationExternalSetInvoke(
240 kernelRegistration,
241 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
242 {
243 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
244 if (userData == nullptr)
245 {
246 return kTfLiteError;
247 }
248
249 return static_cast<ArmnnSubgraph*>(userData)->Invoke(tfLiteContext, tfLiteNode);
250 }
251 );
252
253 const TfLiteStatus status =
254 TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
255 tfLiteContext, kernelRegistration, supportedOperators, tfLiteDelegate);
256
257 TfLiteIntArrayFree(supportedOperators);
258 return status;
259}
260
Teresa Charlin3e4b6082023-10-19 19:13:29 +0100261TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(armnnDelegate::DelegateOptions options)
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000262{
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000263 auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
264 return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
265}
266
267::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault()
268{
269 ::armnnDelegate::DelegateOptions options(armnn::Compute::CpuRef);
270 return options;
271}
272
273void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate)
274{
275 if (tfLiteDelegate != nullptr)
276 {
277 delete static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(TfLiteOpaqueDelegateGetData(tfLiteDelegate));
278 TfLiteOpaqueDelegateDelete(tfLiteDelegate);
279 }
280}
281
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000282const std::string ArmnnOpaqueDelegate::GetVersion() {
283 return OPAQUE_DELEGATE_VERSION;
284}
285
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100286TfLiteIntArray* ArmnnOpaqueDelegate::IdentifyOperatorsToDelegate(TfLiteOpaqueContext* tfLiteContext)
287{
288 TfLiteIntArray* executionPlan = nullptr;
289 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
290 {
291 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unable to get graph execution plan.");
292 return nullptr;
293 }
294
295 // Delegate data with null network
296 DelegateData delegateData(m_Options.GetBackends());
297
298 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
299 if (nodesToDelegate == nullptr)
300 {
301 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
302 "TfLiteArmnnOpaqueDelegate: Unable to create int array from execution plan.");
303 return nullptr;
304 }
305 nodesToDelegate->size = 0;
306
307 std::set<int32_t> unsupportedOperators;
308
309 for (int i = 0; i < executionPlan->size; ++i)
310 {
311 const int nodeIndex = executionPlan->data[i];
312
313 // If TfLiteOpaqueNodes can be delegated to ArmNN
314 TfLiteOpaqueNode* tfLiteNode = nullptr;
315 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
316
317 if (TfLiteOpaqueContextGetNodeAndRegistration(
318 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
319 {
320 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
321 "TfLiteArmnnOpaqueDelegate: Unable to get node and registration for node %d.",
322 nodeIndex);
323 continue;
324 }
325
326 TfLiteStatus visitStatus;
327 try
328 {
329 visitStatus = ArmnnSubgraph::VisitNode(
330 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex);
331 }
332 catch(std::exception& ex)
333 {
334 ARMNN_LOG(error) << "ArmNN Failed to visit node with error: " << ex.what();
335 visitStatus = kTfLiteError;
Ciara Sookarry39436152023-10-31 15:44:41 +0000336 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
337 "Exception text: %s",
338 ex.what());
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100339 }
340
341 if (visitStatus != kTfLiteOk)
342 {
343 // node is not supported by ArmNN
344 unsupportedOperators.insert(TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration));
345 continue;
346 }
347
348 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
349 }
350
351 for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
352 {
353 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
354 "Operator %s [%d] is not supported by armnn_opaque_delegate.",
355 tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
356 *it);
357 }
358
359 if (!unsupportedOperators.empty() && m_Options.TfLiteRuntimeFallbackDisabled())
360 {
361 std::stringstream exMessage;
362 exMessage << "TfLiteArmnnOpaqueDelegate: There are unsupported operators in the model. ";
363 exMessage << "Not falling back to TfLite Runtime as fallback is disabled. ";
364 exMessage << "This should only be disabled under test conditions.";
365 throw armnn::Exception(exMessage.str());
366 }
367 if (nodesToDelegate->size == 0)
368 {
369 ARMNN_LOG(info) << "No operators in this model are supported by the Arm NN TfLite delegate." <<
370 " The model will be executed entirely by TfLite runtime.";
371 }
372
373 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
374 return nodesToDelegate;
375}
376
Ryan OSheaac9607f2023-04-03 11:33:33 +0100377TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
378 TfLiteOpaqueContext* tfLiteContext,
379 const TfLiteIntArray* inputs,
380 std::vector<armnn::BindingPointInfo>& inputBindings)
381{
382 const size_t numInputs = static_cast<size_t>(inputs->size);
383 for (unsigned int i = 0; i < numInputs; ++i)
384 {
385 const int32_t tensorId = inputs->data[i];
386 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
387
388 if(!tensor)
389 {
390 return kTfLiteError;
391 }
392
393 // Do not create bindings for constant inputs
394 if (TfLiteOpaqueTensorGetAllocationType(tensor) == kTfLiteMmapRo)
395 {
396 continue;
397 }
398
399 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
400 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
401
402 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
403 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
404 outputSlot.SetTensorInfo(tensorInfo);
405
406 // Store for creating connections
407 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
408
409 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
410 }
411
412 return kTfLiteOk;
413}
414
415TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
416 TfLiteOpaqueContext* tfLiteContext,
417 const TfLiteIntArray* outputs,
418 std::vector<armnn::BindingPointInfo>& outputBindings)
419{
420 const size_t numOutputs = static_cast<size_t>(outputs->size);
421 for (unsigned int i = 0; i < numOutputs; ++i)
422 {
423 const int32_t tensorId = outputs->data[i];
424 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
425
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100426 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100427 {
428 return kTfLiteError;
429 }
430
431 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
432 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
433
434 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
Ryan OSheac229b3f2023-06-27 22:34:54 +0100435
436 if (delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] == nullptr)
437 {
438 return kTfLiteError;
439 }
440
Ryan OSheaac9607f2023-04-03 11:33:33 +0100441 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
442 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
443 }
444
445 return kTfLiteOk;
446}
447
448ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext,
449 const TfLiteOpaqueDelegateParams* parameters,
450 const ArmnnOpaqueDelegate* delegate)
451{
452 const auto startTime = armnn::GetTimeNow();
453 ARMNN_LOG(info) << "ArmnnSubgraph creation";
454
455 TfLiteIntArray* executionPlan;
456 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
457 {
458 return nullptr;
459 }
460
461 // Initialize DelegateData holds network and output slots information
462 DelegateData delegateData(delegate->m_Options.GetBackends());
463
464 // Build ArmNN Network
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000465 armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions();
Ryan OSheaac9607f2023-04-03 11:33:33 +0100466 armnn::NetworkId networkId;
467 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
468
469 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(
470 TfLiteOpaqueContextGetNumTensors(tfLiteContext), nullptr);
471
472 std::vector<armnn::BindingPointInfo> inputBindings;
473 std::vector<armnn::BindingPointInfo> outputBindings;
474
475 // Add input layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100476 if (AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100477 {
478 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Inputs to the network!");
479 }
480
481 // Parse TfLite delegate nodes to ArmNN
482 const auto parseStartTime = armnn::GetTimeNow();
483 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
484 {
485 const int nodeIndex = parameters->nodes_to_replace->data[i];
486
487 TfLiteOpaqueNode* tfLiteNode = nullptr;
488 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
489 if (TfLiteOpaqueContextGetNodeAndRegistration(
490 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
491 {
492 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to get node registration: " [ nodeIndex]);
493 }
494
495 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
496 {
497 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to parse node: " [ nodeIndex]);
498 }
499 }
500 ARMNN_LOG(info) << "Parse nodes to ArmNN time: " << std::setprecision(2)
501 << std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
502
503 // Add Output layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100504 if (AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100505 {
506 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Outputs to the network!");
507 }
508
509 // Optimize ArmNN network
510 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
511 try
512 {
513 const auto optimizeStartTime = armnn::GetTimeNow();
514 optNet = armnn::Optimize(*(delegateData.m_Network.get()),
515 delegate->m_Options.GetBackends(),
516 delegate->m_Runtime->GetDeviceSpec(),
517 delegate->m_Options.GetOptimizerOptions());
518 ARMNN_LOG(info) << "Optimize ArmnnSubgraph time: " << std::setprecision(2)
519 << std::fixed << armnn::GetTimeDuration(optimizeStartTime).count() << " ms";
520 }
521 catch (std::exception& ex)
522 {
523 std::stringstream exMessage;
524 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from optimize.";
525 throw armnn::Exception(exMessage.str());
526 }
527 if (!optNet)
528 {
529 // Optimize failed
530 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to optimize the network!");
531 }
532
533 // If set, we will serialize the optimized model into a dot file.
534 const std::string serializeToDotFile = delegate->m_Options.GetSerializeToDot();
535 if (!serializeToDotFile.empty())
536 {
537 ARMNN_LOG(info) << "Writing graph to dot file: " << serializeToDotFile;
538 fs::path filename = serializeToDotFile;
539 std::fstream file(filename.c_str(), std::ios_base::out);
540 optNet->SerializeToDot(file);
541 }
542
543 try
544 {
545 const auto loadStartTime = armnn::GetTimeNow();
546
547 // Load graph into runtime
548 std::string errorMessage;
549 armnn::Status loadingStatus;
550 armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
551 armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
552 // There's a bit of an assumption here that the delegate will only support Malloc memory source.
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000553 if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100554 {
555 inputSource = armnn::MemorySource::Malloc;
556 }
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000557 if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100558 {
559 outputSource = armnn::MemorySource::Malloc;
560 }
561 armnn::INetworkProperties networkProperties(false,
562 inputSource,
563 outputSource,
564 delegate->m_Options.GetInternalProfilingState(),
565 delegate->m_Options.GetInternalProfilingDetail());
566 loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
567 std::move(optNet),
568 errorMessage,
569 networkProperties);
570 if (loadingStatus != armnn::Status::Success)
571 {
572 // Network load failed.
573 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Network could not be loaded: " + errorMessage);
574 }
575
576 ARMNN_LOG(info) << "Load ArmnnSubgraph time: " << std::setprecision(2)
577 << std::fixed << armnn::GetTimeDuration(loadStartTime).count() << " ms";
578 }
579 catch (std::exception& ex)
580 {
581 std::stringstream exMessage;
582 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
583 throw armnn::Exception(exMessage.str());
584 }
585
586 // Register debug callback function
587 if (delegate->m_Options.GetDebugCallbackFunction().has_value())
588 {
589 delegate->m_Runtime->RegisterDebugCallback(networkId, delegate->m_Options.GetDebugCallbackFunction().value());
590 }
591
592 ARMNN_LOG(info) << "Overall ArmnnSubgraph creation time: " << std::setprecision(2)
593 << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms\n";
594
595 // Create a new SubGraph with networkId and runtime
596 return new ArmnnSubgraph(networkId, delegate->m_Runtime, inputBindings, outputBindings);
597}
598
599TfLiteStatus ArmnnSubgraph::Prepare(TfLiteOpaqueContext* tfLiteContext)
600{
601 armnn::IgnoreUnused(tfLiteContext);
602 return kTfLiteOk;
603}
604
605TfLiteStatus ArmnnSubgraph::Invoke(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode)
606{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100607 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
608 // This function turns inputIndexArray into an int array of indices. These indices point to the tensors for
609 // each input slot in the node.
610 const int* inputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100611 int numInputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100612 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100613 {
614 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph inputs!");
615 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100616 // Prepare inputs
617 armnn::InputTensors inputTensors;
618 size_t inputIndex = 0;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100619 for (int inputIdx = 0; inputIdx < numInputs; inputIdx++)
620 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100621 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputIndexArray[inputIdx]);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100622
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100623 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100624 {
625 return kTfLiteError;
626 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100627 // If tensor is not read only
Ryan OSheaac9607f2023-04-03 11:33:33 +0100628 if (TfLiteOpaqueTensorGetAllocationType(tensor) != kTfLiteMmapRo)
629 {
630 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
631 armnn::TensorInfo inputTensorInfo = inputBinding.second;
632 inputTensorInfo.SetConstant(true);
633 const armnn::ConstTensor inputTensor(inputTensorInfo, TfLiteOpaqueTensorData(tensor));
Narumol Prangnawarat46e574e2023-05-05 16:39:05 +0100634 inputTensors.emplace_back(inputIndexArray[inputIdx], inputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100635
636 ++inputIndex;
637 }
638 }
639
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100640 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
641 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
642 // each output slot in the node.
643 const int* outputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100644 int numOutputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100645 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100646 {
647 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph outputs!");
648 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100649 // Assign the tensors from the outputIndexArray to the armnn BindingPointInfo
650 armnn::OutputTensors outputTensors;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100651 for (int outputIdx = 0; outputIdx < numOutputs; outputIdx++)
652 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100653 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIdx];
654 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputIndexArray[outputIdx]);
655 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100656 {
657 return kTfLiteError;
658 }
659
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100660 const armnn::Tensor outputTensor(outputBinding.second, reinterpret_cast<TfLiteTensor*>(tensor)->data
661 .data);
662 outputTensors.emplace_back(outputIndexArray[outputIdx], outputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100663 }
664
665 // Run graph
David Monahan727d0172023-10-04 10:16:24 +0100666 try
Ryan OSheaac9607f2023-04-03 11:33:33 +0100667 {
David Monahan727d0172023-10-04 10:16:24 +0100668 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
669 // The delegate holds its own Arm NN runtime so this is our last chance to print internal profiling data.
670 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
671 if (profiler && profiler->IsProfilingEnabled())
672 {
673 profiler->Print(std::cout);
674 }
675 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100676 }
David Monahan727d0172023-10-04 10:16:24 +0100677 catch (armnn::InvalidArgumentException& ex)
678 {
679 ARMNN_LOG(error) << "ArmNN Failed to EnqueueWorkload with error: " << ex.what();
680 // This should really be kTfLiteDelegateError but the Delegate Test Suite expects kTfLiteError so we return
681 // that instead
682 return kTfLiteError;
683 }
684
Ryan OSheaac9607f2023-04-03 11:33:33 +0100685}
686
687TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
688 TfLiteOpaqueContext* tfLiteContext,
689 TfLiteRegistrationExternal* tfLiteRegistration,
690 TfLiteOpaqueNode* tfLiteNode,
691 int nodeIndex)
692{
693 switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
694 {
Teresa Charlinf69ae562023-04-27 14:42:23 +0100695 case kTfLiteBuiltinAbs:
696 return VisitElementwiseUnaryOperator(delegateData,
697 tfLiteContext,
698 tfLiteNode,
699 nodeIndex,
700 kTfLiteBuiltinAbs,
701 armnn::UnaryOperation::Abs);
David Monahan6c53f9f2023-04-27 15:21:19 +0100702 case kTfLiteBuiltinAdd:
703 return VisitElementwiseBinaryOperator(delegateData,
704 tfLiteContext,
705 tfLiteNode,
706 nodeIndex,
707 kTfLiteBuiltinAdd);
John Mcloughlin559d9092023-04-26 20:14:47 +0100708 case kTfLiteBuiltinArgMax:
709 return VisitArgMinMaxOperator(delegateData,
710 tfLiteContext,
711 tfLiteNode,
712 nodeIndex,
713 kTfLiteBuiltinArgMax);
714 case kTfLiteBuiltinArgMin:
715 return VisitArgMinMaxOperator(delegateData,
716 tfLiteContext,
717 tfLiteNode,
718 nodeIndex,
719 kTfLiteBuiltinArgMin);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100720 case kTfLiteBuiltinAveragePool2d:
721 return VisitPooling2dOperator(delegateData,
722 tfLiteContext,
723 tfLiteNode,
724 nodeIndex,
725 kTfLiteBuiltinAveragePool2d);
John Mcloughlin0422cf22023-04-27 16:55:00 +0100726 case kTfLiteBuiltinBatchMatmul:
727 return VisitBatchMatMulOperator(delegateData,
728 tfLiteContext,
729 tfLiteNode,
730 nodeIndex,
731 kTfLiteBuiltinBatchMatmul);
Idriss Chaouchcbf79292023-09-08 11:18:16 +0100732 case kTfLiteBuiltinBroadcastTo:
733 return VisitBroadcastToOperator(delegateData,
734 tfLiteContext,
735 tfLiteNode,
736 nodeIndex,
737 kTfLiteBuiltinBroadcastTo);
Kevin May81b66f32023-04-26 14:55:36 +0100738 case kTfLiteBuiltinBatchToSpaceNd:
739 return VisitBatchToSpaceNdOperator(delegateData,
740 tfLiteContext,
741 tfLiteNode,
742 nodeIndex,
743 kTfLiteBuiltinBatchToSpaceNd);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100744 case kTfLiteBuiltinCast:
745 return VisitCastOperator(delegateData,
746 tfLiteContext,
747 tfLiteNode,
748 nodeIndex,
749 kTfLiteBuiltinCast);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100750 case kTfLiteBuiltinCeil:
751 return VisitElementwiseUnaryOperator(delegateData,
752 tfLiteContext,
753 tfLiteNode,
754 nodeIndex,
755 kTfLiteBuiltinCeil,
756 armnn::UnaryOperation::Ceil);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100757 case kTfLiteBuiltinConcatenation:
758 return VisitControlOperator(delegateData,
759 tfLiteContext,
760 tfLiteNode,
761 nodeIndex,
762 kTfLiteBuiltinConcatenation);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100763 case kTfLiteBuiltinConv2d:
764 return VisitConvolutionOperator(delegateData,
765 tfLiteContext,
766 tfLiteNode,
767 nodeIndex,
768 kTfLiteBuiltinConv2d);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100769 case kTfLiteBuiltinConv3d:
770 return VisitConvolutionOperator(delegateData,
771 tfLiteContext,
772 tfLiteNode,
773 nodeIndex,
774 kTfLiteBuiltinConv3d);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100775 case kTfLiteBuiltinCustom:
776 {
777 // Custom operators are defined by the name rather than the builtin code.
778 // Parse the custom_name param in the registration to point to the correct visitor function.
779 std::string customOperatorName = TfLiteRegistrationExternalGetCustomName(tfLiteRegistration);
780 if ( customOperatorName == "AveragePool3D" )
781 {
782 return VisitPooling3dOperator(delegateData,
783 tfLiteContext,
784 tfLiteNode,
785 nodeIndex,
786 customOperatorName);
787 }
788 else if (customOperatorName == "MaxPool3D")
789 {
790 return VisitPooling3dOperator(delegateData,
791 tfLiteContext,
792 tfLiteNode,
793 nodeIndex,
794 customOperatorName);
795 }
796 // Invalid or unsupported custom operator
797 return kTfLiteError;
798 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100799 case kTfLiteBuiltinDepthwiseConv2d:
800 return VisitConvolutionOperator(delegateData,
801 tfLiteContext,
802 tfLiteNode,
803 nodeIndex,
804 kTfLiteBuiltinDepthwiseConv2d);
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100805 case kTfLiteBuiltinDequantize:
806 return VisitDequantizeOperator(delegateData,
807 tfLiteContext,
808 tfLiteNode,
809 nodeIndex,
810 kTfLiteBuiltinDequantize);
David Monahan6c53f9f2023-04-27 15:21:19 +0100811 case kTfLiteBuiltinDiv:
812 return VisitElementwiseBinaryOperator(delegateData,
813 tfLiteContext,
814 tfLiteNode,
815 nodeIndex,
816 kTfLiteBuiltinDiv);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100817 case kTfLiteBuiltinEqual:
818 return VisitComparisonOperator(delegateData,
819 tfLiteContext,
820 tfLiteNode,
821 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100822 kTfLiteBuiltinEqual,
823 armnn::ComparisonOperation::Equal);
Teresa Charlin42362962023-04-28 14:23:33 +0100824 case kTfLiteBuiltinDepthToSpace:
825 return VisitDepthToSpaceOperator(delegateData,
826 tfLiteContext,
827 tfLiteNode,
828 nodeIndex,
829 kTfLiteBuiltinDepthToSpace);
830 case kTfLiteBuiltinElu:
831 return VisitActivationOperator(delegateData,
832 tfLiteContext,
833 tfLiteNode,
834 nodeIndex,
835 kTfLiteBuiltinElu);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100836 case kTfLiteBuiltinExp:
837 return VisitElementwiseUnaryOperator(delegateData,
838 tfLiteContext,
839 tfLiteNode,
840 nodeIndex,
841 kTfLiteBuiltinExp,
842 armnn::UnaryOperation::Exp);
Matthew Sloyan3504e422023-05-03 13:53:02 +0100843 case kTfLiteBuiltinExpandDims:
844 return VisitExpandDimsOperator(delegateData,
845 tfLiteContext,
846 tfLiteNode,
847 nodeIndex,
848 kTfLiteBuiltinExpandDims);
Ryan OShea59f8f652023-05-11 20:37:53 +0100849 case kTfLiteBuiltinFill:
850 return VisitFillOperator(delegateData,
851 tfLiteContext,
852 tfLiteNode,
853 nodeIndex,
854 kTfLiteBuiltinFill);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100855 case kTfLiteBuiltinFloor:
856 return VisitFloorOperator(delegateData,
857 tfLiteContext,
858 tfLiteNode,
859 nodeIndex,
860 kTfLiteBuiltinFloor);
David Monahan6c53f9f2023-04-27 15:21:19 +0100861 case kTfLiteBuiltinFloorDiv:
862 return VisitElementwiseBinaryOperator(delegateData,
863 tfLiteContext,
864 tfLiteNode,
865 nodeIndex,
866 kTfLiteBuiltinFloorDiv);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100867 case kTfLiteBuiltinFullyConnected:
868 return VisitFullyConnectedOperator(delegateData,
869 tfLiteContext,
870 tfLiteNode,
871 nodeIndex,
872 kTfLiteBuiltinFullyConnected);
Kevin Mayb2831c52023-04-26 17:27:24 +0100873 case kTfLiteBuiltinGather:
874 return VisitGatherOperator(delegateData,
875 tfLiteContext,
876 tfLiteNode,
877 nodeIndex,
878 kTfLiteBuiltinGather);
879 case kTfLiteBuiltinGatherNd:
880 return VisitGatherNdOperator(delegateData,
881 tfLiteContext,
882 tfLiteNode,
883 nodeIndex,
884 kTfLiteBuiltinGatherNd);
Teresa Charlin077cddb2023-09-15 15:19:21 +0100885 case kTfLiteBuiltinGelu:
886 return VisitActivationOperator(delegateData,
887 tfLiteContext,
888 tfLiteNode,
889 nodeIndex,
890 kTfLiteBuiltinGelu);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100891 case kTfLiteBuiltinGreater:
892 return VisitComparisonOperator(delegateData,
893 tfLiteContext,
894 tfLiteNode,
895 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100896 kTfLiteBuiltinGreater,
897 armnn::ComparisonOperation::Greater);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100898 case kTfLiteBuiltinGreaterEqual:
899 return VisitComparisonOperator(delegateData,
900 tfLiteContext,
901 tfLiteNode,
902 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100903 kTfLiteBuiltinGreaterEqual,
904 armnn::ComparisonOperation::GreaterOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100905 case kTfLiteBuiltinHardSwish:
906 return VisitActivationOperator(delegateData,
907 tfLiteContext,
908 tfLiteNode,
909 nodeIndex,
910 kTfLiteBuiltinHardSwish);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100911 case kTfLiteBuiltinL2Normalization:
912 return VisitL2NormalizationOperator(delegateData,
913 tfLiteContext,
914 tfLiteNode,
915 nodeIndex,
916 kTfLiteBuiltinL2Normalization);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100917 case kTfLiteBuiltinL2Pool2d:
918 return VisitPooling2dOperator(delegateData,
919 tfLiteContext,
920 tfLiteNode,
921 nodeIndex,
922 kTfLiteBuiltinL2Pool2d);
Tianle Chengae931732023-07-28 11:53:04 +0100923 case kTfLiteBuiltinLeakyRelu:
924 return VisitActivationOperator(delegateData,
925 tfLiteContext,
926 tfLiteNode,
927 nodeIndex,
928 kTfLiteBuiltinLeakyRelu);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100929 case kTfLiteBuiltinLess:
930 return VisitComparisonOperator(delegateData,
931 tfLiteContext,
932 tfLiteNode,
933 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100934 kTfLiteBuiltinLess,
935 armnn::ComparisonOperation::Less);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100936 case kTfLiteBuiltinLessEqual:
937 return VisitComparisonOperator(delegateData,
938 tfLiteContext,
939 tfLiteNode,
940 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100941 kTfLiteBuiltinLessEqual,
942 armnn::ComparisonOperation::LessOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100943 case kTfLiteBuiltinLogistic:
944 return VisitActivationOperator(delegateData,
945 tfLiteContext,
946 tfLiteNode,
947 nodeIndex,
948 kTfLiteBuiltinLogistic);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100949 case kTfLiteBuiltinLocalResponseNormalization:
950 return VisitLocalResponseNormalizationOperator(delegateData,
951 tfLiteContext,
952 tfLiteNode,
953 nodeIndex,
954 kTfLiteBuiltinLocalResponseNormalization);
955 case kTfLiteBuiltinLog:
956 return VisitElementwiseUnaryOperator(delegateData,
957 tfLiteContext,
958 tfLiteNode,
959 nodeIndex,
960 kTfLiteBuiltinLog,
961 armnn::UnaryOperation::Log);
962 case kTfLiteBuiltinLogicalAnd:
963 return VisitLogicalBinaryOperator(delegateData,
964 tfLiteContext,
965 tfLiteNode,
966 nodeIndex,
967 kTfLiteBuiltinLogicalAnd,
968 armnn::LogicalBinaryOperation::LogicalAnd);
969 case kTfLiteBuiltinLogicalNot:
970 return VisitElementwiseUnaryOperator(delegateData,
971 tfLiteContext,
972 tfLiteNode,
973 nodeIndex,
974 kTfLiteBuiltinLogicalNot,
975 armnn::UnaryOperation::LogicalNot);
976 case kTfLiteBuiltinLogicalOr:
977 return VisitLogicalBinaryOperator(delegateData,
978 tfLiteContext,
979 tfLiteNode,
980 nodeIndex,
981 kTfLiteBuiltinLogicalOr,
982 armnn::LogicalBinaryOperation::LogicalOr);
Teresa Charlin42362962023-04-28 14:23:33 +0100983 case kTfLiteBuiltinLogSoftmax:
984 return VisitSoftmaxOperator(delegateData,
985 tfLiteContext,
986 tfLiteNode,
987 nodeIndex,
988 kTfLiteBuiltinLogSoftmax);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100989 case kTfLiteBuiltinLstm:
990 return VisitLstmOperator(delegateData,
991 tfLiteContext,
992 tfLiteNode,
993 nodeIndex,
994 kTfLiteBuiltinLstm);
995 case kTfLiteBuiltinMaxPool2d:
996 return VisitPooling2dOperator(delegateData,
997 tfLiteContext,
998 tfLiteNode,
999 nodeIndex,
1000 kTfLiteBuiltinMaxPool2d);
David Monahan6c53f9f2023-04-27 15:21:19 +01001001 case kTfLiteBuiltinMaximum:
1002 return VisitElementwiseBinaryOperator(delegateData,
1003 tfLiteContext,
1004 tfLiteNode,
1005 nodeIndex,
1006 kTfLiteBuiltinMaximum);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01001007 case kTfLiteBuiltinMean:
1008 return VisitControlOperator(delegateData,
1009 tfLiteContext,
1010 tfLiteNode,
1011 nodeIndex,
1012 kTfLiteBuiltinMean);
David Monahan6c53f9f2023-04-27 15:21:19 +01001013 case kTfLiteBuiltinMinimum:
1014 return VisitElementwiseBinaryOperator(delegateData,
1015 tfLiteContext,
1016 tfLiteNode,
1017 nodeIndex,
1018 kTfLiteBuiltinMinimum);
Ryan OShea59f8f652023-05-11 20:37:53 +01001019 case kTfLiteBuiltinMirrorPad:
1020 return VisitPadOperator(delegateData,
1021 tfLiteContext,
1022 tfLiteNode,
1023 nodeIndex,
1024 kTfLiteBuiltinMirrorPad);
David Monahan6c53f9f2023-04-27 15:21:19 +01001025 case kTfLiteBuiltinMul:
1026 return VisitElementwiseBinaryOperator(delegateData,
1027 tfLiteContext,
1028 tfLiteNode,
1029 nodeIndex,
1030 kTfLiteBuiltinMul);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001031 case kTfLiteBuiltinNeg:
1032 return VisitElementwiseUnaryOperator(delegateData,
1033 tfLiteContext,
1034 tfLiteNode,
1035 nodeIndex,
1036 kTfLiteBuiltinNeg,
1037 armnn::UnaryOperation::Neg);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01001038 case kTfLiteBuiltinNotEqual:
1039 return VisitComparisonOperator(delegateData,
1040 tfLiteContext,
1041 tfLiteNode,
1042 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +01001043 kTfLiteBuiltinNotEqual,
1044 armnn::ComparisonOperation::NotEqual);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001045 case kTfLiteBuiltinPack:
1046 return VisitPackOperator(delegateData,
1047 tfLiteContext,
1048 tfLiteNode,
1049 nodeIndex,
1050 kTfLiteBuiltinPack);
1051 case kTfLiteBuiltinPad:
1052 return VisitPadOperator(delegateData,
1053 tfLiteContext,
1054 tfLiteNode,
1055 nodeIndex,
1056 kTfLiteBuiltinPad);
1057 case kTfLiteBuiltinPadv2:
1058 return VisitPadOperator(delegateData,
1059 tfLiteContext,
1060 tfLiteNode,
1061 nodeIndex,
1062 kTfLiteBuiltinPadv2);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001063 case kTfLiteBuiltinPow:
1064 return VisitElementwiseBinaryOperator(delegateData,
1065 tfLiteContext,
1066 tfLiteNode,
1067 nodeIndex,
1068 kTfLiteBuiltinPow);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001069 case kTfLiteBuiltinPrelu:
1070 return VisitPreluOperator(delegateData,
1071 tfLiteContext,
1072 tfLiteNode,
1073 nodeIndex,
1074 kTfLiteBuiltinPrelu);
Francis Murtagh36d94ef2023-04-28 14:05:43 +01001075 case kTfLiteBuiltinQuantize:
1076 return VisitQuantizeOperator(delegateData,
1077 tfLiteContext,
1078 tfLiteNode,
1079 nodeIndex,
1080 kTfLiteBuiltinQuantize);
John Mcloughlin083586d2023-04-28 18:36:52 +01001081 case kTfLiteBuiltinReduceMax:
1082 return VisitReduceOperator(delegateData,
1083 tfLiteContext,
1084 tfLiteNode,
1085 nodeIndex,
1086 kTfLiteBuiltinReduceMax);
1087 case kTfLiteBuiltinReduceMin:
1088 return VisitReduceOperator(delegateData,
1089 tfLiteContext,
1090 tfLiteNode,
1091 nodeIndex,
1092 kTfLiteBuiltinReduceMin);
1093 case kTfLiteBuiltinReduceProd:
1094 return VisitReduceOperator(delegateData,
1095 tfLiteContext,
1096 tfLiteNode,
1097 nodeIndex,
1098 kTfLiteBuiltinReduceProd);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001099 case kTfLiteBuiltinRelu:
1100 return VisitActivationOperator(delegateData,
1101 tfLiteContext,
1102 tfLiteNode,
1103 nodeIndex,
1104 kTfLiteBuiltinRelu);
1105 case kTfLiteBuiltinReluN1To1:
1106 return VisitActivationOperator(delegateData,
1107 tfLiteContext,
1108 tfLiteNode,
1109 nodeIndex,
1110 kTfLiteBuiltinReluN1To1);
1111 case kTfLiteBuiltinRelu6:
1112 return VisitActivationOperator(delegateData,
1113 tfLiteContext,
1114 tfLiteNode,
1115 nodeIndex,
1116 kTfLiteBuiltinRelu6);
Matthew Sloyanc49aacc2023-04-28 17:27:26 +01001117 case kTfLiteBuiltinReshape:
1118 return VisitReshapeOperator(delegateData,
1119 tfLiteContext,
1120 tfLiteNode,
1121 nodeIndex,
1122 kTfLiteBuiltinReshape);
John Mcloughlin083586d2023-04-28 18:36:52 +01001123 case kTfLiteBuiltinResizeNearestNeighbor:
1124 return VisitResizeOperator(delegateData,
1125 tfLiteContext,
1126 tfLiteNode,
1127 nodeIndex,
1128 kTfLiteBuiltinResizeNearestNeighbor);
1129 case kTfLiteBuiltinResizeBilinear:
1130 return VisitResizeOperator(delegateData,
1131 tfLiteContext,
1132 tfLiteNode,
1133 nodeIndex,
1134 kTfLiteBuiltinResizeBilinear);
Tracy Narine7306bbe2023-07-17 16:06:26 +01001135 case kTfLiteBuiltinReverseV2:
1136 return VisitReverseV2Operator(delegateData,
1137 tfLiteContext,
1138 tfLiteNode,
1139 nodeIndex,
1140 kTfLiteBuiltinReverseV2);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001141 case kTfLiteBuiltinRsqrt:
1142 return VisitElementwiseUnaryOperator(delegateData,
1143 tfLiteContext,
1144 tfLiteNode,
1145 nodeIndex,
1146 kTfLiteBuiltinRsqrt,
1147 armnn::UnaryOperation::Rsqrt);
John Mcloughlin0422cf22023-04-27 16:55:00 +01001148 case kTfLiteBuiltinShape:
1149 return VisitShapeOperator(delegateData,
1150 tfLiteContext,
1151 tfLiteNode,
1152 nodeIndex,
1153 kTfLiteBuiltinShape);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001154 case kTfLiteBuiltinSin:
1155 return VisitElementwiseUnaryOperator(delegateData,
1156 tfLiteContext,
1157 tfLiteNode,
1158 nodeIndex,
1159 kTfLiteBuiltinSin,
1160 armnn::UnaryOperation::Sin);
Teresa Charlin86b03572023-04-28 13:19:12 +01001161 case kTfLiteBuiltinSlice:
1162 return VisitSliceOperator(delegateData,
1163 tfLiteContext,
1164 tfLiteNode,
1165 nodeIndex,
1166 kTfLiteBuiltinSlice);
Teresa Charlin42362962023-04-28 14:23:33 +01001167 case kTfLiteBuiltinSoftmax:
1168 return VisitSoftmaxOperator(delegateData,
1169 tfLiteContext,
1170 tfLiteNode,
1171 nodeIndex,
1172 kTfLiteBuiltinSoftmax);
Kevin May81b66f32023-04-26 14:55:36 +01001173 case kTfLiteBuiltinSpaceToBatchNd:
1174 return VisitSpaceToBatchNdOperator(delegateData,
1175 tfLiteContext,
1176 tfLiteNode,
1177 nodeIndex,
1178 kTfLiteBuiltinSpaceToBatchNd);
Teresa Charlin42362962023-04-28 14:23:33 +01001179 case kTfLiteBuiltinSpaceToDepth:
1180 return VisitSpaceToDepthOperator(delegateData,
1181 tfLiteContext,
1182 tfLiteNode,
1183 nodeIndex,
1184 kTfLiteBuiltinSpaceToDepth);
David Monahanc833cef2023-05-03 15:53:03 +01001185 case kTfLiteBuiltinSplit:
1186 return VisitSplitOperator(delegateData,
1187 tfLiteContext,
1188 tfLiteNode,
1189 nodeIndex,
1190 kTfLiteBuiltinSplit);
1191 case kTfLiteBuiltinSplitV:
1192 return VisitSplitVOperator(delegateData,
1193 tfLiteContext,
1194 tfLiteNode,
1195 nodeIndex,
1196 kTfLiteBuiltinSplitV);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001197 case kTfLiteBuiltinSquaredDifference:
1198 return VisitElementwiseBinaryOperator(delegateData,
1199 tfLiteContext,
1200 tfLiteNode,
1201 nodeIndex,
1202 kTfLiteBuiltinSquaredDifference);
David Monahan6c53f9f2023-04-27 15:21:19 +01001203 case kTfLiteBuiltinSub:
1204 return VisitElementwiseBinaryOperator(delegateData,
1205 tfLiteContext,
1206 tfLiteNode,
1207 nodeIndex,
1208 kTfLiteBuiltinSub);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001209 case kTfLiteBuiltinSqrt:
1210 return VisitElementwiseUnaryOperator(delegateData,
1211 tfLiteContext,
1212 tfLiteNode,
1213 nodeIndex,
1214 kTfLiteBuiltinSqrt,
1215 armnn::UnaryOperation::Sqrt);
Matthew Sloyan3504e422023-05-03 13:53:02 +01001216 case kTfLiteBuiltinSqueeze:
1217 return VisitSqueezeOperator(delegateData,
1218 tfLiteContext,
1219 tfLiteNode,
1220 nodeIndex,
1221 kTfLiteBuiltinSqueeze);
Teresa Charlin86b03572023-04-28 13:19:12 +01001222 case kTfLiteBuiltinStridedSlice:
1223 return VisitStridedSliceOperator(delegateData,
1224 tfLiteContext,
1225 tfLiteNode,
1226 nodeIndex,
1227 kTfLiteBuiltinStridedSlice);
John Mcloughlin083586d2023-04-28 18:36:52 +01001228 case kTfLiteBuiltinSum:
1229 return VisitReduceOperator(delegateData,
1230 tfLiteContext,
1231 tfLiteNode,
1232 nodeIndex,
1233 kTfLiteBuiltinSum);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001234 case kTfLiteBuiltinTanh:
1235 return VisitActivationOperator(delegateData,
1236 tfLiteContext,
1237 tfLiteNode,
1238 nodeIndex,
1239 kTfLiteBuiltinTanh);
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001240 case kTfLiteBuiltinTile:
1241 return VisitTileOperator(delegateData,
1242 tfLiteContext,
1243 tfLiteNode,
1244 nodeIndex,
1245 kTfLiteBuiltinTile);
Teresa Charlin42362962023-04-28 14:23:33 +01001246 case kTfLiteBuiltinTranspose:
1247 return VisitTransposeOperator(delegateData,
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001248 tfLiteContext,
1249 tfLiteNode,
1250 nodeIndex,
1251 kTfLiteBuiltinTranspose);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001252 case kTfLiteBuiltinTransposeConv:
1253 return VisitConvolutionOperator(delegateData,
1254 tfLiteContext,
1255 tfLiteNode,
1256 nodeIndex,
1257 kTfLiteBuiltinTransposeConv);
Matthew Sloyan74be13e2023-05-03 17:34:00 +01001258 case kTfLiteBuiltinUnidirectionalSequenceLstm:
1259 return VisitUnidirectionalSequenceLstmOperator(delegateData,
1260 tfLiteContext,
1261 tfLiteNode,
1262 nodeIndex,
1263 kTfLiteBuiltinUnidirectionalSequenceLstm);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001264 case kTfLiteBuiltinUnpack:
1265 return VisitUnpackOperator(delegateData,
1266 tfLiteContext,
1267 tfLiteNode,
1268 nodeIndex,
1269 kTfLiteBuiltinUnpack);
Ryan OSheaac9607f2023-04-03 11:33:33 +01001270 default:
1271 return kTfLiteError;
1272 }
1273}
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001274} // armnnOpaqueDelegate namespace