blob: 8e3597d1d32a820422e4cc373bf8935bea6e7fa6 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Ryan OSheaac9607f2023-04-03 11:33:33 +01007#include <OpaqueDelegateUtils.hpp>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00008
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00009#include "Activation.hpp"
10#include "ArgMinMax.hpp"
11#include "BatchMatMul.hpp"
12#include "BatchSpace.hpp"
Idriss Chaouchcbf79292023-09-08 11:18:16 +010013#include "BroadcastTo.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000014#include "Comparison.hpp"
15#include "Convolution.hpp"
16#include "Control.hpp"
17#include "ElementwiseBinary.hpp"
18#include "ElementwiseUnary.hpp"
19#include "Fill.hpp"
20#include "FullyConnected.hpp"
21#include "Gather.hpp"
22#include "GatherNd.hpp"
23#include "LogicalBinary.hpp"
24#include "Lstm.hpp"
25#include "Normalization.hpp"
26#include "Pack.hpp"
27#include "Pad.hpp"
28#include "Pooling.hpp"
29#include "Prelu.hpp"
30#include "Quantization.hpp"
31#include "Redefine.hpp"
32#include "Reduce.hpp"
33#include "Resize.hpp"
Tracy Narine7306bbe2023-07-17 16:06:26 +010034#include "ReverseV2.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000035#include "Round.hpp"
36#include "Shape.hpp"
37#include "Slice.hpp"
38#include "StridedSlice.hpp"
39#include "Softmax.hpp"
40#include "SpaceDepth.hpp"
41#include "Split.hpp"
Tianle Cheng92ce35c2023-07-25 16:41:00 +010042#include "Tile.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000043#include "Transpose.hpp"
44#include "UnidirectionalSequenceLstm.hpp"
45#include "Unpack.hpp"
46
47#include <armnn/utility/IgnoreUnused.hpp>
48#include <armnnUtils/Filesystem.hpp>
49#include <armnn/utility/Timer.hpp>
50#include <flatbuffers/flatbuffers.h>
51#include <tensorflow/lite/context_util.h>
52#include <tensorflow/lite/schema/schema_generated.h>
53#include <tensorflow/lite/minimal_logging.h>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000054
55#include <algorithm>
56#include <iostream>
57#include <sstream>
Teresa Charlin19ad8162023-10-04 11:17:03 +010058#include <regex>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000059
60namespace armnnOpaqueDelegate
61{
62
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010063static auto* g_delegate_plugin_ArmnnDelegatePlugin_ =
Ryan OShea59f8f652023-05-11 20:37:53 +010064 new tflite::delegates::DelegatePluginRegistry::Register("armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010065 ArmnnDelegatePlugin::New);
66
Teresa Charlin19ad8162023-10-04 11:17:03 +010067armnnDelegate::DelegateOptions ParseArmNNSettings(const tflite::TFLiteSettings* tfLiteSettings)
68{
69 const tflite::ArmNNSettings* settings = tfLiteSettings->armnn_settings();
70 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(settings,
71 "The passed TFLiteSettings did not contain a valid ArmNNSettings");
72
73 // Extract settings fields
74 bool fastmath = settings->fastmath();
75 std::string backends_str = (settings->backends()) ? settings->backends()->str() : "";
76 const ::flatbuffers::String* additional_parameters = settings->additional_parameters();
77
78 // Build additional parameters string
79 std::string additional_parameters_str;
80 if (additional_parameters)
81 {
82 additional_parameters_str = additional_parameters->str();
83
84 // Apply a regex to remove spaces around the = and , signs
85 std::regex regex_equals_str("[ ]*=[ ]*");
86 std::regex regex_comma_str("[ ]*,[ ]*");
87 additional_parameters_str = std::regex_replace(additional_parameters_str, regex_equals_str, "=");
88 additional_parameters_str = std::regex_replace(additional_parameters_str, regex_comma_str, ",");
89 }
90
91 // Build a std::pair list of option names and values
92 std::vector<std::pair<std::string, std::string>> options;
93 options.emplace_back(std::pair<std::string, std::string>("backends", backends_str));
94 options.emplace_back(std::pair<std::string, std::string>("enable-fast-math", (fastmath) ? "true" : "false"));
95
96 std::stringstream additional_parameters_ss(additional_parameters_str);
97 while (additional_parameters_ss.good())
98 {
99 std::string option_str;
100 getline( additional_parameters_ss, option_str, ',' );
101 size_t n = option_str.find("=");
102 if (n != std::string::npos)
103 {
104 std::string name = option_str.substr(0, n);
105 std::string value = option_str.substr(n + 1, std::string::npos);
106 options.emplace_back(std::pair<std::string, std::string>(name, value));
107 }
108 }
109
110 // Build the key and value lists to pass into the constructor of the DelegateOptions
111 size_t num_options = options.size();
112 std::unique_ptr<const char*> options_keys = std::unique_ptr<const char*>(new const char*[num_options + 1]);
113 std::unique_ptr<const char*> options_values = std::unique_ptr<const char*>(new const char*[num_options + 1]);
114
115 for (size_t i=0; i<num_options; ++i)
116 {
117 options_keys.get()[i] = options[i].first.c_str();
118 options_values.get()[i] = options[i].second.c_str();
119 }
120
121 // Finally call the constructor
122 armnnDelegate::DelegateOptions delegateOptions = armnnDelegate::DelegateOptions(options_keys.get(),
123 options_values.get(),
124 num_options,
125 nullptr);
126
127 return delegateOptions;
128}
129
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000130ArmnnOpaqueDelegate::ArmnnOpaqueDelegate(armnnDelegate::DelegateOptions options)
131 : m_Options(std::move(options))
132{
133 // Configures logging for ARMNN
134 if (m_Options.IsLoggingEnabled())
135 {
136 armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
137 }
138 // Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all armnn_delegate
139 // instances so the RuntimeOptions cannot be altered for different armnn_delegate instances.
140 m_Runtime = GetRuntime(m_Options.GetRuntimeOptions());
141 std::vector<armnn::BackendId> backends;
142 if (m_Runtime)
143 {
144 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
145 for (auto& backend : m_Options.GetBackends())
146 {
147 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
148 {
149 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100150 "TfLiteArmnnOpaqueDelegate: Requested unknown backend %s", backend.Get().c_str());
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000151 }
152 else
153 {
154 backends.push_back(backend);
155 }
156 }
157 }
158
159 if (backends.empty())
160 {
161 // No known backend specified
162 throw armnn::InvalidArgumentException("TfLiteArmnnOpaqueDelegate: No known backend specified.");
163 }
164 m_Options.SetBackends(backends);
165
166 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnOpaqueDelegate: Created TfLite ArmNN delegate.");
167}
168
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100169TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate* tfLiteDelegate, void* data)
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100170{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100171 // We are required to have the void* data parameter in the function signature, but we don't actually use it.
172 armnn::IgnoreUnused(data);
173
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100174 TfLiteIntArray* supportedOperators =
175 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100176 (TfLiteOpaqueDelegateGetData(tfLiteDelegate))->IdentifyOperatorsToDelegate(tfLiteContext);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100177 if(supportedOperators == nullptr)
178 {
179 return kTfLiteError;
180 }
181
182 // ArmNN Opaque Delegate Registration
183 TfLiteRegistrationExternal* kernelRegistration =
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100184 TfLiteRegistrationExternalCreate(kTfLiteBuiltinDelegate,
Ryan OShea59f8f652023-05-11 20:37:53 +0100185 "armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100186 /*version=*/OPAQUE_DELEGATE_MAJOR_VERSION);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100187 if(kernelRegistration == nullptr)
188 {
189 return kTfLiteError;
190 }
191
192 TfLiteRegistrationExternalSetInit(
193 kernelRegistration,
194 [](TfLiteOpaqueContext* tfLiteContext, const char* buffer, size_t length) -> void*
195 {
196 armnn::IgnoreUnused(length);
197 const TfLiteOpaqueDelegateParams* parameters =
198 reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
199 if(parameters == nullptr)
200 {
201 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
202 "TfLiteArmnnOpaqueDelegate: Unable to get parameters.");
203 return nullptr;
204 }
205
206 return static_cast<void*>(
207 ArmnnSubgraph::Create(tfLiteContext,
208 parameters,
209 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100210 parameters->delegate->opaque_delegate_builder->data)));
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100211 }
212 );
213
214 TfLiteRegistrationExternalSetFree(
215 kernelRegistration,
216 [](TfLiteOpaqueContext* tfLiteContext, void* buffer) -> void
217 {
218 armnn::IgnoreUnused(tfLiteContext);
219 if (buffer != nullptr)
220 {
221 delete static_cast<ArmnnSubgraph*>(buffer);
222 }
223 }
224 );
225
226 TfLiteRegistrationExternalSetPrepare(
227 kernelRegistration,
228 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
229 {
230 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
231 if (userData == nullptr)
232 {
233 return kTfLiteError;
234 }
235 return static_cast<ArmnnSubgraph*>(userData)->Prepare(tfLiteContext);
236 }
237 );
238
239 TfLiteRegistrationExternalSetInvoke(
240 kernelRegistration,
241 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
242 {
243 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
244 if (userData == nullptr)
245 {
246 return kTfLiteError;
247 }
248
249 return static_cast<ArmnnSubgraph*>(userData)->Invoke(tfLiteContext, tfLiteNode);
250 }
251 );
252
253 const TfLiteStatus status =
254 TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
255 tfLiteContext, kernelRegistration, supportedOperators, tfLiteDelegate);
256
257 TfLiteIntArrayFree(supportedOperators);
258 return status;
259}
260
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000261TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(const void* settings)
262{
263 // This method will always create Opaque Delegate with default settings until
264 // we have a DelegateOptions Constructor which can parse the void* settings
265 armnn::IgnoreUnused(settings);
266 auto options = TfLiteArmnnDelegateOptionsDefault();
267 auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
268 return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
269}
270
271::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault()
272{
273 ::armnnDelegate::DelegateOptions options(armnn::Compute::CpuRef);
274 return options;
275}
276
277void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate)
278{
279 if (tfLiteDelegate != nullptr)
280 {
281 delete static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(TfLiteOpaqueDelegateGetData(tfLiteDelegate));
282 TfLiteOpaqueDelegateDelete(tfLiteDelegate);
283 }
284}
285
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000286const std::string ArmnnOpaqueDelegate::GetVersion() {
287 return OPAQUE_DELEGATE_VERSION;
288}
289
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100290TfLiteIntArray* ArmnnOpaqueDelegate::IdentifyOperatorsToDelegate(TfLiteOpaqueContext* tfLiteContext)
291{
292 TfLiteIntArray* executionPlan = nullptr;
293 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
294 {
295 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unable to get graph execution plan.");
296 return nullptr;
297 }
298
299 // Delegate data with null network
300 DelegateData delegateData(m_Options.GetBackends());
301
302 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
303 if (nodesToDelegate == nullptr)
304 {
305 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
306 "TfLiteArmnnOpaqueDelegate: Unable to create int array from execution plan.");
307 return nullptr;
308 }
309 nodesToDelegate->size = 0;
310
311 std::set<int32_t> unsupportedOperators;
312
313 for (int i = 0; i < executionPlan->size; ++i)
314 {
315 const int nodeIndex = executionPlan->data[i];
316
317 // If TfLiteOpaqueNodes can be delegated to ArmNN
318 TfLiteOpaqueNode* tfLiteNode = nullptr;
319 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
320
321 if (TfLiteOpaqueContextGetNodeAndRegistration(
322 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
323 {
324 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
325 "TfLiteArmnnOpaqueDelegate: Unable to get node and registration for node %d.",
326 nodeIndex);
327 continue;
328 }
329
330 TfLiteStatus visitStatus;
331 try
332 {
333 visitStatus = ArmnnSubgraph::VisitNode(
334 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex);
335 }
336 catch(std::exception& ex)
337 {
338 ARMNN_LOG(error) << "ArmNN Failed to visit node with error: " << ex.what();
339 visitStatus = kTfLiteError;
340 }
341
342 if (visitStatus != kTfLiteOk)
343 {
344 // node is not supported by ArmNN
345 unsupportedOperators.insert(TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration));
346 continue;
347 }
348
349 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
350 }
351
352 for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
353 {
354 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
355 "Operator %s [%d] is not supported by armnn_opaque_delegate.",
356 tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
357 *it);
358 }
359
360 if (!unsupportedOperators.empty() && m_Options.TfLiteRuntimeFallbackDisabled())
361 {
362 std::stringstream exMessage;
363 exMessage << "TfLiteArmnnOpaqueDelegate: There are unsupported operators in the model. ";
364 exMessage << "Not falling back to TfLite Runtime as fallback is disabled. ";
365 exMessage << "This should only be disabled under test conditions.";
366 throw armnn::Exception(exMessage.str());
367 }
368 if (nodesToDelegate->size == 0)
369 {
370 ARMNN_LOG(info) << "No operators in this model are supported by the Arm NN TfLite delegate." <<
371 " The model will be executed entirely by TfLite runtime.";
372 }
373
374 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
375 return nodesToDelegate;
376}
377
Ryan OSheaac9607f2023-04-03 11:33:33 +0100378TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
379 TfLiteOpaqueContext* tfLiteContext,
380 const TfLiteIntArray* inputs,
381 std::vector<armnn::BindingPointInfo>& inputBindings)
382{
383 const size_t numInputs = static_cast<size_t>(inputs->size);
384 for (unsigned int i = 0; i < numInputs; ++i)
385 {
386 const int32_t tensorId = inputs->data[i];
387 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
388
389 if(!tensor)
390 {
391 return kTfLiteError;
392 }
393
394 // Do not create bindings for constant inputs
395 if (TfLiteOpaqueTensorGetAllocationType(tensor) == kTfLiteMmapRo)
396 {
397 continue;
398 }
399
400 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
401 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
402
403 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
404 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
405 outputSlot.SetTensorInfo(tensorInfo);
406
407 // Store for creating connections
408 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
409
410 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
411 }
412
413 return kTfLiteOk;
414}
415
416TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
417 TfLiteOpaqueContext* tfLiteContext,
418 const TfLiteIntArray* outputs,
419 std::vector<armnn::BindingPointInfo>& outputBindings)
420{
421 const size_t numOutputs = static_cast<size_t>(outputs->size);
422 for (unsigned int i = 0; i < numOutputs; ++i)
423 {
424 const int32_t tensorId = outputs->data[i];
425 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
426
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100427 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100428 {
429 return kTfLiteError;
430 }
431
432 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
433 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
434
435 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
Ryan OSheac229b3f2023-06-27 22:34:54 +0100436
437 if (delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] == nullptr)
438 {
439 return kTfLiteError;
440 }
441
Ryan OSheaac9607f2023-04-03 11:33:33 +0100442 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
443 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
444 }
445
446 return kTfLiteOk;
447}
448
449ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext,
450 const TfLiteOpaqueDelegateParams* parameters,
451 const ArmnnOpaqueDelegate* delegate)
452{
453 const auto startTime = armnn::GetTimeNow();
454 ARMNN_LOG(info) << "ArmnnSubgraph creation";
455
456 TfLiteIntArray* executionPlan;
457 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
458 {
459 return nullptr;
460 }
461
462 // Initialize DelegateData holds network and output slots information
463 DelegateData delegateData(delegate->m_Options.GetBackends());
464
465 // Build ArmNN Network
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000466 armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions();
Ryan OSheaac9607f2023-04-03 11:33:33 +0100467 armnn::NetworkId networkId;
468 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
469
470 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(
471 TfLiteOpaqueContextGetNumTensors(tfLiteContext), nullptr);
472
473 std::vector<armnn::BindingPointInfo> inputBindings;
474 std::vector<armnn::BindingPointInfo> outputBindings;
475
476 // Add input layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100477 if (AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100478 {
479 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Inputs to the network!");
480 }
481
482 // Parse TfLite delegate nodes to ArmNN
483 const auto parseStartTime = armnn::GetTimeNow();
484 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
485 {
486 const int nodeIndex = parameters->nodes_to_replace->data[i];
487
488 TfLiteOpaqueNode* tfLiteNode = nullptr;
489 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
490 if (TfLiteOpaqueContextGetNodeAndRegistration(
491 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
492 {
493 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to get node registration: " [ nodeIndex]);
494 }
495
496 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
497 {
498 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to parse node: " [ nodeIndex]);
499 }
500 }
501 ARMNN_LOG(info) << "Parse nodes to ArmNN time: " << std::setprecision(2)
502 << std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
503
504 // Add Output layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100505 if (AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100506 {
507 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Outputs to the network!");
508 }
509
510 // Optimize ArmNN network
511 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
512 try
513 {
514 const auto optimizeStartTime = armnn::GetTimeNow();
515 optNet = armnn::Optimize(*(delegateData.m_Network.get()),
516 delegate->m_Options.GetBackends(),
517 delegate->m_Runtime->GetDeviceSpec(),
518 delegate->m_Options.GetOptimizerOptions());
519 ARMNN_LOG(info) << "Optimize ArmnnSubgraph time: " << std::setprecision(2)
520 << std::fixed << armnn::GetTimeDuration(optimizeStartTime).count() << " ms";
521 }
522 catch (std::exception& ex)
523 {
524 std::stringstream exMessage;
525 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from optimize.";
526 throw armnn::Exception(exMessage.str());
527 }
528 if (!optNet)
529 {
530 // Optimize failed
531 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to optimize the network!");
532 }
533
534 // If set, we will serialize the optimized model into a dot file.
535 const std::string serializeToDotFile = delegate->m_Options.GetSerializeToDot();
536 if (!serializeToDotFile.empty())
537 {
538 ARMNN_LOG(info) << "Writing graph to dot file: " << serializeToDotFile;
539 fs::path filename = serializeToDotFile;
540 std::fstream file(filename.c_str(), std::ios_base::out);
541 optNet->SerializeToDot(file);
542 }
543
544 try
545 {
546 const auto loadStartTime = armnn::GetTimeNow();
547
548 // Load graph into runtime
549 std::string errorMessage;
550 armnn::Status loadingStatus;
551 armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
552 armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
553 // There's a bit of an assumption here that the delegate will only support Malloc memory source.
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000554 if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100555 {
556 inputSource = armnn::MemorySource::Malloc;
557 }
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000558 if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100559 {
560 outputSource = armnn::MemorySource::Malloc;
561 }
562 armnn::INetworkProperties networkProperties(false,
563 inputSource,
564 outputSource,
565 delegate->m_Options.GetInternalProfilingState(),
566 delegate->m_Options.GetInternalProfilingDetail());
567 loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
568 std::move(optNet),
569 errorMessage,
570 networkProperties);
571 if (loadingStatus != armnn::Status::Success)
572 {
573 // Network load failed.
574 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Network could not be loaded: " + errorMessage);
575 }
576
577 ARMNN_LOG(info) << "Load ArmnnSubgraph time: " << std::setprecision(2)
578 << std::fixed << armnn::GetTimeDuration(loadStartTime).count() << " ms";
579 }
580 catch (std::exception& ex)
581 {
582 std::stringstream exMessage;
583 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
584 throw armnn::Exception(exMessage.str());
585 }
586
587 // Register debug callback function
588 if (delegate->m_Options.GetDebugCallbackFunction().has_value())
589 {
590 delegate->m_Runtime->RegisterDebugCallback(networkId, delegate->m_Options.GetDebugCallbackFunction().value());
591 }
592
593 ARMNN_LOG(info) << "Overall ArmnnSubgraph creation time: " << std::setprecision(2)
594 << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms\n";
595
596 // Create a new SubGraph with networkId and runtime
597 return new ArmnnSubgraph(networkId, delegate->m_Runtime, inputBindings, outputBindings);
598}
599
600TfLiteStatus ArmnnSubgraph::Prepare(TfLiteOpaqueContext* tfLiteContext)
601{
602 armnn::IgnoreUnused(tfLiteContext);
603 return kTfLiteOk;
604}
605
606TfLiteStatus ArmnnSubgraph::Invoke(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode)
607{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100608 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
609 // This function turns inputIndexArray into an int array of indices. These indices point to the tensors for
610 // each input slot in the node.
611 const int* inputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100612 int numInputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100613 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100614 {
615 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph inputs!");
616 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100617 // Prepare inputs
618 armnn::InputTensors inputTensors;
619 size_t inputIndex = 0;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100620 for (int inputIdx = 0; inputIdx < numInputs; inputIdx++)
621 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100622 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputIndexArray[inputIdx]);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100623
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100624 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100625 {
626 return kTfLiteError;
627 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100628 // If tensor is not read only
Ryan OSheaac9607f2023-04-03 11:33:33 +0100629 if (TfLiteOpaqueTensorGetAllocationType(tensor) != kTfLiteMmapRo)
630 {
631 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
632 armnn::TensorInfo inputTensorInfo = inputBinding.second;
633 inputTensorInfo.SetConstant(true);
634 const armnn::ConstTensor inputTensor(inputTensorInfo, TfLiteOpaqueTensorData(tensor));
Narumol Prangnawarat46e574e2023-05-05 16:39:05 +0100635 inputTensors.emplace_back(inputIndexArray[inputIdx], inputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100636
637 ++inputIndex;
638 }
639 }
640
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100641 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
642 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
643 // each output slot in the node.
644 const int* outputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100645 int numOutputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100646 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100647 {
648 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph outputs!");
649 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100650 // Assign the tensors from the outputIndexArray to the armnn BindingPointInfo
651 armnn::OutputTensors outputTensors;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100652 for (int outputIdx = 0; outputIdx < numOutputs; outputIdx++)
653 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100654 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIdx];
655 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputIndexArray[outputIdx]);
656 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100657 {
658 return kTfLiteError;
659 }
660
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100661 const armnn::Tensor outputTensor(outputBinding.second, reinterpret_cast<TfLiteTensor*>(tensor)->data
662 .data);
663 outputTensors.emplace_back(outputIndexArray[outputIdx], outputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100664 }
665
666 // Run graph
David Monahan727d0172023-10-04 10:16:24 +0100667 try
Ryan OSheaac9607f2023-04-03 11:33:33 +0100668 {
David Monahan727d0172023-10-04 10:16:24 +0100669 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
670 // The delegate holds its own Arm NN runtime so this is our last chance to print internal profiling data.
671 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
672 if (profiler && profiler->IsProfilingEnabled())
673 {
674 profiler->Print(std::cout);
675 }
676 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100677 }
David Monahan727d0172023-10-04 10:16:24 +0100678 catch (armnn::InvalidArgumentException& ex)
679 {
680 ARMNN_LOG(error) << "ArmNN Failed to EnqueueWorkload with error: " << ex.what();
681 // This should really be kTfLiteDelegateError but the Delegate Test Suite expects kTfLiteError so we return
682 // that instead
683 return kTfLiteError;
684 }
685
Ryan OSheaac9607f2023-04-03 11:33:33 +0100686}
687
688TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
689 TfLiteOpaqueContext* tfLiteContext,
690 TfLiteRegistrationExternal* tfLiteRegistration,
691 TfLiteOpaqueNode* tfLiteNode,
692 int nodeIndex)
693{
694 switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
695 {
Teresa Charlinf69ae562023-04-27 14:42:23 +0100696 case kTfLiteBuiltinAbs:
697 return VisitElementwiseUnaryOperator(delegateData,
698 tfLiteContext,
699 tfLiteNode,
700 nodeIndex,
701 kTfLiteBuiltinAbs,
702 armnn::UnaryOperation::Abs);
David Monahan6c53f9f2023-04-27 15:21:19 +0100703 case kTfLiteBuiltinAdd:
704 return VisitElementwiseBinaryOperator(delegateData,
705 tfLiteContext,
706 tfLiteNode,
707 nodeIndex,
708 kTfLiteBuiltinAdd);
John Mcloughlin559d9092023-04-26 20:14:47 +0100709 case kTfLiteBuiltinArgMax:
710 return VisitArgMinMaxOperator(delegateData,
711 tfLiteContext,
712 tfLiteNode,
713 nodeIndex,
714 kTfLiteBuiltinArgMax);
715 case kTfLiteBuiltinArgMin:
716 return VisitArgMinMaxOperator(delegateData,
717 tfLiteContext,
718 tfLiteNode,
719 nodeIndex,
720 kTfLiteBuiltinArgMin);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100721 case kTfLiteBuiltinAveragePool2d:
722 return VisitPooling2dOperator(delegateData,
723 tfLiteContext,
724 tfLiteNode,
725 nodeIndex,
726 kTfLiteBuiltinAveragePool2d);
John Mcloughlin0422cf22023-04-27 16:55:00 +0100727 case kTfLiteBuiltinBatchMatmul:
728 return VisitBatchMatMulOperator(delegateData,
729 tfLiteContext,
730 tfLiteNode,
731 nodeIndex,
732 kTfLiteBuiltinBatchMatmul);
Idriss Chaouchcbf79292023-09-08 11:18:16 +0100733 case kTfLiteBuiltinBroadcastTo:
734 return VisitBroadcastToOperator(delegateData,
735 tfLiteContext,
736 tfLiteNode,
737 nodeIndex,
738 kTfLiteBuiltinBroadcastTo);
Kevin May81b66f32023-04-26 14:55:36 +0100739 case kTfLiteBuiltinBatchToSpaceNd:
740 return VisitBatchToSpaceNdOperator(delegateData,
741 tfLiteContext,
742 tfLiteNode,
743 nodeIndex,
744 kTfLiteBuiltinBatchToSpaceNd);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100745 case kTfLiteBuiltinCast:
746 return VisitCastOperator(delegateData,
747 tfLiteContext,
748 tfLiteNode,
749 nodeIndex,
750 kTfLiteBuiltinCast);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100751 case kTfLiteBuiltinCeil:
752 return VisitElementwiseUnaryOperator(delegateData,
753 tfLiteContext,
754 tfLiteNode,
755 nodeIndex,
756 kTfLiteBuiltinCeil,
757 armnn::UnaryOperation::Ceil);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100758 case kTfLiteBuiltinConcatenation:
759 return VisitControlOperator(delegateData,
760 tfLiteContext,
761 tfLiteNode,
762 nodeIndex,
763 kTfLiteBuiltinConcatenation);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100764 case kTfLiteBuiltinConv2d:
765 return VisitConvolutionOperator(delegateData,
766 tfLiteContext,
767 tfLiteNode,
768 nodeIndex,
769 kTfLiteBuiltinConv2d);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100770 case kTfLiteBuiltinConv3d:
771 return VisitConvolutionOperator(delegateData,
772 tfLiteContext,
773 tfLiteNode,
774 nodeIndex,
775 kTfLiteBuiltinConv3d);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100776 case kTfLiteBuiltinCustom:
777 {
778 // Custom operators are defined by the name rather than the builtin code.
779 // Parse the custom_name param in the registration to point to the correct visitor function.
780 std::string customOperatorName = TfLiteRegistrationExternalGetCustomName(tfLiteRegistration);
781 if ( customOperatorName == "AveragePool3D" )
782 {
783 return VisitPooling3dOperator(delegateData,
784 tfLiteContext,
785 tfLiteNode,
786 nodeIndex,
787 customOperatorName);
788 }
789 else if (customOperatorName == "MaxPool3D")
790 {
791 return VisitPooling3dOperator(delegateData,
792 tfLiteContext,
793 tfLiteNode,
794 nodeIndex,
795 customOperatorName);
796 }
797 // Invalid or unsupported custom operator
798 return kTfLiteError;
799 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100800 case kTfLiteBuiltinDepthwiseConv2d:
801 return VisitConvolutionOperator(delegateData,
802 tfLiteContext,
803 tfLiteNode,
804 nodeIndex,
805 kTfLiteBuiltinDepthwiseConv2d);
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100806 case kTfLiteBuiltinDequantize:
807 return VisitDequantizeOperator(delegateData,
808 tfLiteContext,
809 tfLiteNode,
810 nodeIndex,
811 kTfLiteBuiltinDequantize);
David Monahan6c53f9f2023-04-27 15:21:19 +0100812 case kTfLiteBuiltinDiv:
813 return VisitElementwiseBinaryOperator(delegateData,
814 tfLiteContext,
815 tfLiteNode,
816 nodeIndex,
817 kTfLiteBuiltinDiv);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100818 case kTfLiteBuiltinEqual:
819 return VisitComparisonOperator(delegateData,
820 tfLiteContext,
821 tfLiteNode,
822 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100823 kTfLiteBuiltinEqual,
824 armnn::ComparisonOperation::Equal);
Teresa Charlin42362962023-04-28 14:23:33 +0100825 case kTfLiteBuiltinDepthToSpace:
826 return VisitDepthToSpaceOperator(delegateData,
827 tfLiteContext,
828 tfLiteNode,
829 nodeIndex,
830 kTfLiteBuiltinDepthToSpace);
831 case kTfLiteBuiltinElu:
832 return VisitActivationOperator(delegateData,
833 tfLiteContext,
834 tfLiteNode,
835 nodeIndex,
836 kTfLiteBuiltinElu);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100837 case kTfLiteBuiltinExp:
838 return VisitElementwiseUnaryOperator(delegateData,
839 tfLiteContext,
840 tfLiteNode,
841 nodeIndex,
842 kTfLiteBuiltinExp,
843 armnn::UnaryOperation::Exp);
Matthew Sloyan3504e422023-05-03 13:53:02 +0100844 case kTfLiteBuiltinExpandDims:
845 return VisitExpandDimsOperator(delegateData,
846 tfLiteContext,
847 tfLiteNode,
848 nodeIndex,
849 kTfLiteBuiltinExpandDims);
Ryan OShea59f8f652023-05-11 20:37:53 +0100850 case kTfLiteBuiltinFill:
851 return VisitFillOperator(delegateData,
852 tfLiteContext,
853 tfLiteNode,
854 nodeIndex,
855 kTfLiteBuiltinFill);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100856 case kTfLiteBuiltinFloor:
857 return VisitFloorOperator(delegateData,
858 tfLiteContext,
859 tfLiteNode,
860 nodeIndex,
861 kTfLiteBuiltinFloor);
David Monahan6c53f9f2023-04-27 15:21:19 +0100862 case kTfLiteBuiltinFloorDiv:
863 return VisitElementwiseBinaryOperator(delegateData,
864 tfLiteContext,
865 tfLiteNode,
866 nodeIndex,
867 kTfLiteBuiltinFloorDiv);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100868 case kTfLiteBuiltinFullyConnected:
869 return VisitFullyConnectedOperator(delegateData,
870 tfLiteContext,
871 tfLiteNode,
872 nodeIndex,
873 kTfLiteBuiltinFullyConnected);
Kevin Mayb2831c52023-04-26 17:27:24 +0100874 case kTfLiteBuiltinGather:
875 return VisitGatherOperator(delegateData,
876 tfLiteContext,
877 tfLiteNode,
878 nodeIndex,
879 kTfLiteBuiltinGather);
880 case kTfLiteBuiltinGatherNd:
881 return VisitGatherNdOperator(delegateData,
882 tfLiteContext,
883 tfLiteNode,
884 nodeIndex,
885 kTfLiteBuiltinGatherNd);
Teresa Charlin077cddb2023-09-15 15:19:21 +0100886 case kTfLiteBuiltinGelu:
887 return VisitActivationOperator(delegateData,
888 tfLiteContext,
889 tfLiteNode,
890 nodeIndex,
891 kTfLiteBuiltinGelu);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100892 case kTfLiteBuiltinGreater:
893 return VisitComparisonOperator(delegateData,
894 tfLiteContext,
895 tfLiteNode,
896 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100897 kTfLiteBuiltinGreater,
898 armnn::ComparisonOperation::Greater);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100899 case kTfLiteBuiltinGreaterEqual:
900 return VisitComparisonOperator(delegateData,
901 tfLiteContext,
902 tfLiteNode,
903 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100904 kTfLiteBuiltinGreaterEqual,
905 armnn::ComparisonOperation::GreaterOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100906 case kTfLiteBuiltinHardSwish:
907 return VisitActivationOperator(delegateData,
908 tfLiteContext,
909 tfLiteNode,
910 nodeIndex,
911 kTfLiteBuiltinHardSwish);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100912 case kTfLiteBuiltinL2Normalization:
913 return VisitL2NormalizationOperator(delegateData,
914 tfLiteContext,
915 tfLiteNode,
916 nodeIndex,
917 kTfLiteBuiltinL2Normalization);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100918 case kTfLiteBuiltinL2Pool2d:
919 return VisitPooling2dOperator(delegateData,
920 tfLiteContext,
921 tfLiteNode,
922 nodeIndex,
923 kTfLiteBuiltinL2Pool2d);
Tianle Chengae931732023-07-28 11:53:04 +0100924 case kTfLiteBuiltinLeakyRelu:
925 return VisitActivationOperator(delegateData,
926 tfLiteContext,
927 tfLiteNode,
928 nodeIndex,
929 kTfLiteBuiltinLeakyRelu);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100930 case kTfLiteBuiltinLess:
931 return VisitComparisonOperator(delegateData,
932 tfLiteContext,
933 tfLiteNode,
934 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100935 kTfLiteBuiltinLess,
936 armnn::ComparisonOperation::Less);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100937 case kTfLiteBuiltinLessEqual:
938 return VisitComparisonOperator(delegateData,
939 tfLiteContext,
940 tfLiteNode,
941 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100942 kTfLiteBuiltinLessEqual,
943 armnn::ComparisonOperation::LessOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100944 case kTfLiteBuiltinLogistic:
945 return VisitActivationOperator(delegateData,
946 tfLiteContext,
947 tfLiteNode,
948 nodeIndex,
949 kTfLiteBuiltinLogistic);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100950 case kTfLiteBuiltinLocalResponseNormalization:
951 return VisitLocalResponseNormalizationOperator(delegateData,
952 tfLiteContext,
953 tfLiteNode,
954 nodeIndex,
955 kTfLiteBuiltinLocalResponseNormalization);
956 case kTfLiteBuiltinLog:
957 return VisitElementwiseUnaryOperator(delegateData,
958 tfLiteContext,
959 tfLiteNode,
960 nodeIndex,
961 kTfLiteBuiltinLog,
962 armnn::UnaryOperation::Log);
963 case kTfLiteBuiltinLogicalAnd:
964 return VisitLogicalBinaryOperator(delegateData,
965 tfLiteContext,
966 tfLiteNode,
967 nodeIndex,
968 kTfLiteBuiltinLogicalAnd,
969 armnn::LogicalBinaryOperation::LogicalAnd);
970 case kTfLiteBuiltinLogicalNot:
971 return VisitElementwiseUnaryOperator(delegateData,
972 tfLiteContext,
973 tfLiteNode,
974 nodeIndex,
975 kTfLiteBuiltinLogicalNot,
976 armnn::UnaryOperation::LogicalNot);
977 case kTfLiteBuiltinLogicalOr:
978 return VisitLogicalBinaryOperator(delegateData,
979 tfLiteContext,
980 tfLiteNode,
981 nodeIndex,
982 kTfLiteBuiltinLogicalOr,
983 armnn::LogicalBinaryOperation::LogicalOr);
Teresa Charlin42362962023-04-28 14:23:33 +0100984 case kTfLiteBuiltinLogSoftmax:
985 return VisitSoftmaxOperator(delegateData,
986 tfLiteContext,
987 tfLiteNode,
988 nodeIndex,
989 kTfLiteBuiltinLogSoftmax);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100990 case kTfLiteBuiltinLstm:
991 return VisitLstmOperator(delegateData,
992 tfLiteContext,
993 tfLiteNode,
994 nodeIndex,
995 kTfLiteBuiltinLstm);
996 case kTfLiteBuiltinMaxPool2d:
997 return VisitPooling2dOperator(delegateData,
998 tfLiteContext,
999 tfLiteNode,
1000 nodeIndex,
1001 kTfLiteBuiltinMaxPool2d);
David Monahan6c53f9f2023-04-27 15:21:19 +01001002 case kTfLiteBuiltinMaximum:
1003 return VisitElementwiseBinaryOperator(delegateData,
1004 tfLiteContext,
1005 tfLiteNode,
1006 nodeIndex,
1007 kTfLiteBuiltinMaximum);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01001008 case kTfLiteBuiltinMean:
1009 return VisitControlOperator(delegateData,
1010 tfLiteContext,
1011 tfLiteNode,
1012 nodeIndex,
1013 kTfLiteBuiltinMean);
David Monahan6c53f9f2023-04-27 15:21:19 +01001014 case kTfLiteBuiltinMinimum:
1015 return VisitElementwiseBinaryOperator(delegateData,
1016 tfLiteContext,
1017 tfLiteNode,
1018 nodeIndex,
1019 kTfLiteBuiltinMinimum);
Ryan OShea59f8f652023-05-11 20:37:53 +01001020 case kTfLiteBuiltinMirrorPad:
1021 return VisitPadOperator(delegateData,
1022 tfLiteContext,
1023 tfLiteNode,
1024 nodeIndex,
1025 kTfLiteBuiltinMirrorPad);
David Monahan6c53f9f2023-04-27 15:21:19 +01001026 case kTfLiteBuiltinMul:
1027 return VisitElementwiseBinaryOperator(delegateData,
1028 tfLiteContext,
1029 tfLiteNode,
1030 nodeIndex,
1031 kTfLiteBuiltinMul);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001032 case kTfLiteBuiltinNeg:
1033 return VisitElementwiseUnaryOperator(delegateData,
1034 tfLiteContext,
1035 tfLiteNode,
1036 nodeIndex,
1037 kTfLiteBuiltinNeg,
1038 armnn::UnaryOperation::Neg);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01001039 case kTfLiteBuiltinNotEqual:
1040 return VisitComparisonOperator(delegateData,
1041 tfLiteContext,
1042 tfLiteNode,
1043 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +01001044 kTfLiteBuiltinNotEqual,
1045 armnn::ComparisonOperation::NotEqual);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001046 case kTfLiteBuiltinPack:
1047 return VisitPackOperator(delegateData,
1048 tfLiteContext,
1049 tfLiteNode,
1050 nodeIndex,
1051 kTfLiteBuiltinPack);
1052 case kTfLiteBuiltinPad:
1053 return VisitPadOperator(delegateData,
1054 tfLiteContext,
1055 tfLiteNode,
1056 nodeIndex,
1057 kTfLiteBuiltinPad);
1058 case kTfLiteBuiltinPadv2:
1059 return VisitPadOperator(delegateData,
1060 tfLiteContext,
1061 tfLiteNode,
1062 nodeIndex,
1063 kTfLiteBuiltinPadv2);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001064 case kTfLiteBuiltinPow:
1065 return VisitElementwiseBinaryOperator(delegateData,
1066 tfLiteContext,
1067 tfLiteNode,
1068 nodeIndex,
1069 kTfLiteBuiltinPow);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001070 case kTfLiteBuiltinPrelu:
1071 return VisitPreluOperator(delegateData,
1072 tfLiteContext,
1073 tfLiteNode,
1074 nodeIndex,
1075 kTfLiteBuiltinPrelu);
Francis Murtagh36d94ef2023-04-28 14:05:43 +01001076 case kTfLiteBuiltinQuantize:
1077 return VisitQuantizeOperator(delegateData,
1078 tfLiteContext,
1079 tfLiteNode,
1080 nodeIndex,
1081 kTfLiteBuiltinQuantize);
John Mcloughlin083586d2023-04-28 18:36:52 +01001082 case kTfLiteBuiltinReduceMax:
1083 return VisitReduceOperator(delegateData,
1084 tfLiteContext,
1085 tfLiteNode,
1086 nodeIndex,
1087 kTfLiteBuiltinReduceMax);
1088 case kTfLiteBuiltinReduceMin:
1089 return VisitReduceOperator(delegateData,
1090 tfLiteContext,
1091 tfLiteNode,
1092 nodeIndex,
1093 kTfLiteBuiltinReduceMin);
1094 case kTfLiteBuiltinReduceProd:
1095 return VisitReduceOperator(delegateData,
1096 tfLiteContext,
1097 tfLiteNode,
1098 nodeIndex,
1099 kTfLiteBuiltinReduceProd);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001100 case kTfLiteBuiltinRelu:
1101 return VisitActivationOperator(delegateData,
1102 tfLiteContext,
1103 tfLiteNode,
1104 nodeIndex,
1105 kTfLiteBuiltinRelu);
1106 case kTfLiteBuiltinReluN1To1:
1107 return VisitActivationOperator(delegateData,
1108 tfLiteContext,
1109 tfLiteNode,
1110 nodeIndex,
1111 kTfLiteBuiltinReluN1To1);
1112 case kTfLiteBuiltinRelu6:
1113 return VisitActivationOperator(delegateData,
1114 tfLiteContext,
1115 tfLiteNode,
1116 nodeIndex,
1117 kTfLiteBuiltinRelu6);
Matthew Sloyanc49aacc2023-04-28 17:27:26 +01001118 case kTfLiteBuiltinReshape:
1119 return VisitReshapeOperator(delegateData,
1120 tfLiteContext,
1121 tfLiteNode,
1122 nodeIndex,
1123 kTfLiteBuiltinReshape);
John Mcloughlin083586d2023-04-28 18:36:52 +01001124 case kTfLiteBuiltinResizeNearestNeighbor:
1125 return VisitResizeOperator(delegateData,
1126 tfLiteContext,
1127 tfLiteNode,
1128 nodeIndex,
1129 kTfLiteBuiltinResizeNearestNeighbor);
1130 case kTfLiteBuiltinResizeBilinear:
1131 return VisitResizeOperator(delegateData,
1132 tfLiteContext,
1133 tfLiteNode,
1134 nodeIndex,
1135 kTfLiteBuiltinResizeBilinear);
Tracy Narine7306bbe2023-07-17 16:06:26 +01001136 case kTfLiteBuiltinReverseV2:
1137 return VisitReverseV2Operator(delegateData,
1138 tfLiteContext,
1139 tfLiteNode,
1140 nodeIndex,
1141 kTfLiteBuiltinReverseV2);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001142 case kTfLiteBuiltinRsqrt:
1143 return VisitElementwiseUnaryOperator(delegateData,
1144 tfLiteContext,
1145 tfLiteNode,
1146 nodeIndex,
1147 kTfLiteBuiltinRsqrt,
1148 armnn::UnaryOperation::Rsqrt);
John Mcloughlin0422cf22023-04-27 16:55:00 +01001149 case kTfLiteBuiltinShape:
1150 return VisitShapeOperator(delegateData,
1151 tfLiteContext,
1152 tfLiteNode,
1153 nodeIndex,
1154 kTfLiteBuiltinShape);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001155 case kTfLiteBuiltinSin:
1156 return VisitElementwiseUnaryOperator(delegateData,
1157 tfLiteContext,
1158 tfLiteNode,
1159 nodeIndex,
1160 kTfLiteBuiltinSin,
1161 armnn::UnaryOperation::Sin);
Teresa Charlin86b03572023-04-28 13:19:12 +01001162 case kTfLiteBuiltinSlice:
1163 return VisitSliceOperator(delegateData,
1164 tfLiteContext,
1165 tfLiteNode,
1166 nodeIndex,
1167 kTfLiteBuiltinSlice);
Teresa Charlin42362962023-04-28 14:23:33 +01001168 case kTfLiteBuiltinSoftmax:
1169 return VisitSoftmaxOperator(delegateData,
1170 tfLiteContext,
1171 tfLiteNode,
1172 nodeIndex,
1173 kTfLiteBuiltinSoftmax);
Kevin May81b66f32023-04-26 14:55:36 +01001174 case kTfLiteBuiltinSpaceToBatchNd:
1175 return VisitSpaceToBatchNdOperator(delegateData,
1176 tfLiteContext,
1177 tfLiteNode,
1178 nodeIndex,
1179 kTfLiteBuiltinSpaceToBatchNd);
Teresa Charlin42362962023-04-28 14:23:33 +01001180 case kTfLiteBuiltinSpaceToDepth:
1181 return VisitSpaceToDepthOperator(delegateData,
1182 tfLiteContext,
1183 tfLiteNode,
1184 nodeIndex,
1185 kTfLiteBuiltinSpaceToDepth);
David Monahanc833cef2023-05-03 15:53:03 +01001186 case kTfLiteBuiltinSplit:
1187 return VisitSplitOperator(delegateData,
1188 tfLiteContext,
1189 tfLiteNode,
1190 nodeIndex,
1191 kTfLiteBuiltinSplit);
1192 case kTfLiteBuiltinSplitV:
1193 return VisitSplitVOperator(delegateData,
1194 tfLiteContext,
1195 tfLiteNode,
1196 nodeIndex,
1197 kTfLiteBuiltinSplitV);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001198 case kTfLiteBuiltinSquaredDifference:
1199 return VisitElementwiseBinaryOperator(delegateData,
1200 tfLiteContext,
1201 tfLiteNode,
1202 nodeIndex,
1203 kTfLiteBuiltinSquaredDifference);
David Monahan6c53f9f2023-04-27 15:21:19 +01001204 case kTfLiteBuiltinSub:
1205 return VisitElementwiseBinaryOperator(delegateData,
1206 tfLiteContext,
1207 tfLiteNode,
1208 nodeIndex,
1209 kTfLiteBuiltinSub);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001210 case kTfLiteBuiltinSqrt:
1211 return VisitElementwiseUnaryOperator(delegateData,
1212 tfLiteContext,
1213 tfLiteNode,
1214 nodeIndex,
1215 kTfLiteBuiltinSqrt,
1216 armnn::UnaryOperation::Sqrt);
Matthew Sloyan3504e422023-05-03 13:53:02 +01001217 case kTfLiteBuiltinSqueeze:
1218 return VisitSqueezeOperator(delegateData,
1219 tfLiteContext,
1220 tfLiteNode,
1221 nodeIndex,
1222 kTfLiteBuiltinSqueeze);
Teresa Charlin86b03572023-04-28 13:19:12 +01001223 case kTfLiteBuiltinStridedSlice:
1224 return VisitStridedSliceOperator(delegateData,
1225 tfLiteContext,
1226 tfLiteNode,
1227 nodeIndex,
1228 kTfLiteBuiltinStridedSlice);
John Mcloughlin083586d2023-04-28 18:36:52 +01001229 case kTfLiteBuiltinSum:
1230 return VisitReduceOperator(delegateData,
1231 tfLiteContext,
1232 tfLiteNode,
1233 nodeIndex,
1234 kTfLiteBuiltinSum);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001235 case kTfLiteBuiltinTanh:
1236 return VisitActivationOperator(delegateData,
1237 tfLiteContext,
1238 tfLiteNode,
1239 nodeIndex,
1240 kTfLiteBuiltinTanh);
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001241 case kTfLiteBuiltinTile:
1242 return VisitTileOperator(delegateData,
1243 tfLiteContext,
1244 tfLiteNode,
1245 nodeIndex,
1246 kTfLiteBuiltinTile);
Teresa Charlin42362962023-04-28 14:23:33 +01001247 case kTfLiteBuiltinTranspose:
1248 return VisitTransposeOperator(delegateData,
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001249 tfLiteContext,
1250 tfLiteNode,
1251 nodeIndex,
1252 kTfLiteBuiltinTranspose);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001253 case kTfLiteBuiltinTransposeConv:
1254 return VisitConvolutionOperator(delegateData,
1255 tfLiteContext,
1256 tfLiteNode,
1257 nodeIndex,
1258 kTfLiteBuiltinTransposeConv);
Matthew Sloyan74be13e2023-05-03 17:34:00 +01001259 case kTfLiteBuiltinUnidirectionalSequenceLstm:
1260 return VisitUnidirectionalSequenceLstmOperator(delegateData,
1261 tfLiteContext,
1262 tfLiteNode,
1263 nodeIndex,
1264 kTfLiteBuiltinUnidirectionalSequenceLstm);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001265 case kTfLiteBuiltinUnpack:
1266 return VisitUnpackOperator(delegateData,
1267 tfLiteContext,
1268 tfLiteNode,
1269 nodeIndex,
1270 kTfLiteBuiltinUnpack);
Ryan OSheaac9607f2023-04-03 11:33:33 +01001271 default:
1272 return kTfLiteError;
1273 }
1274}
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001275} // armnnOpaqueDelegate namespace