blob: 129bc4333b65a8526faaa88eed3c5b9a7e80a5ea [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include <armnn_delegate.hpp>
Ryan OSheaac9607f2023-04-03 11:33:33 +01007#include <OpaqueDelegateUtils.hpp>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00008
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00009#include "Activation.hpp"
10#include "ArgMinMax.hpp"
11#include "BatchMatMul.hpp"
12#include "BatchSpace.hpp"
Idriss Chaouchcbf79292023-09-08 11:18:16 +010013#include "BroadcastTo.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000014#include "Comparison.hpp"
15#include "Convolution.hpp"
16#include "Control.hpp"
17#include "ElementwiseBinary.hpp"
18#include "ElementwiseUnary.hpp"
19#include "Fill.hpp"
20#include "FullyConnected.hpp"
21#include "Gather.hpp"
22#include "GatherNd.hpp"
23#include "LogicalBinary.hpp"
24#include "Lstm.hpp"
25#include "Normalization.hpp"
26#include "Pack.hpp"
27#include "Pad.hpp"
28#include "Pooling.hpp"
29#include "Prelu.hpp"
30#include "Quantization.hpp"
31#include "Redefine.hpp"
32#include "Reduce.hpp"
33#include "Resize.hpp"
Tracy Narine7306bbe2023-07-17 16:06:26 +010034#include "ReverseV2.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000035#include "Round.hpp"
36#include "Shape.hpp"
37#include "Slice.hpp"
38#include "StridedSlice.hpp"
39#include "Softmax.hpp"
40#include "SpaceDepth.hpp"
41#include "Split.hpp"
Tianle Cheng92ce35c2023-07-25 16:41:00 +010042#include "Tile.hpp"
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000043#include "Transpose.hpp"
44#include "UnidirectionalSequenceLstm.hpp"
45#include "Unpack.hpp"
46
47#include <armnn/utility/IgnoreUnused.hpp>
48#include <armnnUtils/Filesystem.hpp>
49#include <armnn/utility/Timer.hpp>
50#include <flatbuffers/flatbuffers.h>
51#include <tensorflow/lite/context_util.h>
52#include <tensorflow/lite/schema/schema_generated.h>
53#include <tensorflow/lite/minimal_logging.h>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000054
55#include <algorithm>
56#include <iostream>
57#include <sstream>
Teresa Charlin19ad8162023-10-04 11:17:03 +010058#include <regex>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000059
60namespace armnnOpaqueDelegate
61{
62
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010063static auto* g_delegate_plugin_ArmnnDelegatePlugin_ =
Ryan OShea59f8f652023-05-11 20:37:53 +010064 new tflite::delegates::DelegatePluginRegistry::Register("armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +010065 ArmnnDelegatePlugin::New);
66
Teresa Charlin19ad8162023-10-04 11:17:03 +010067armnnDelegate::DelegateOptions ParseArmNNSettings(const tflite::TFLiteSettings* tfLiteSettings)
68{
69 const tflite::ArmNNSettings* settings = tfLiteSettings->armnn_settings();
70 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(settings,
71 "The passed TFLiteSettings did not contain a valid ArmNNSettings");
72
73 // Extract settings fields
74 bool fastmath = settings->fastmath();
75 std::string backends_str = (settings->backends()) ? settings->backends()->str() : "";
76 const ::flatbuffers::String* additional_parameters = settings->additional_parameters();
77
78 // Build additional parameters string
79 std::string additional_parameters_str;
80 if (additional_parameters)
81 {
82 additional_parameters_str = additional_parameters->str();
83
84 // Apply a regex to remove spaces around the = and , signs
85 std::regex regex_equals_str("[ ]*=[ ]*");
86 std::regex regex_comma_str("[ ]*,[ ]*");
87 additional_parameters_str = std::regex_replace(additional_parameters_str, regex_equals_str, "=");
88 additional_parameters_str = std::regex_replace(additional_parameters_str, regex_comma_str, ",");
89 }
90
91 // Build a std::pair list of option names and values
92 std::vector<std::pair<std::string, std::string>> options;
93 options.emplace_back(std::pair<std::string, std::string>("backends", backends_str));
94 options.emplace_back(std::pair<std::string, std::string>("enable-fast-math", (fastmath) ? "true" : "false"));
95
96 std::stringstream additional_parameters_ss(additional_parameters_str);
97 while (additional_parameters_ss.good())
98 {
99 std::string option_str;
100 getline( additional_parameters_ss, option_str, ',' );
101 size_t n = option_str.find("=");
102 if (n != std::string::npos)
103 {
104 std::string name = option_str.substr(0, n);
105 std::string value = option_str.substr(n + 1, std::string::npos);
106 options.emplace_back(std::pair<std::string, std::string>(name, value));
107 }
108 }
109
110 // Build the key and value lists to pass into the constructor of the DelegateOptions
111 size_t num_options = options.size();
112 std::unique_ptr<const char*> options_keys = std::unique_ptr<const char*>(new const char*[num_options + 1]);
113 std::unique_ptr<const char*> options_values = std::unique_ptr<const char*>(new const char*[num_options + 1]);
114
115 for (size_t i=0; i<num_options; ++i)
116 {
117 options_keys.get()[i] = options[i].first.c_str();
118 options_values.get()[i] = options[i].second.c_str();
119 }
120
121 // Finally call the constructor
122 armnnDelegate::DelegateOptions delegateOptions = armnnDelegate::DelegateOptions(options_keys.get(),
123 options_values.get(),
124 num_options,
125 nullptr);
126
127 return delegateOptions;
128}
129
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000130ArmnnOpaqueDelegate::ArmnnOpaqueDelegate(armnnDelegate::DelegateOptions options)
131 : m_Options(std::move(options))
132{
133 // Configures logging for ARMNN
134 if (m_Options.IsLoggingEnabled())
135 {
136 armnn::ConfigureLogging(true, true, m_Options.GetLoggingSeverity());
137 }
138 // Create/Get the static ArmNN Runtime. Note that the m_Runtime will be shared by all armnn_delegate
139 // instances so the RuntimeOptions cannot be altered for different armnn_delegate instances.
140 m_Runtime = GetRuntime(m_Options.GetRuntimeOptions());
141 std::vector<armnn::BackendId> backends;
142 if (m_Runtime)
143 {
144 const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
145 for (auto& backend : m_Options.GetBackends())
146 {
147 if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
148 {
149 TFLITE_LOG_PROD(tflite::TFLITE_LOG_INFO,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100150 "TfLiteArmnnOpaqueDelegate: Requested unknown backend %s", backend.Get().c_str());
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000151 }
152 else
153 {
154 backends.push_back(backend);
155 }
156 }
157 }
158
159 if (backends.empty())
160 {
161 // No known backend specified
162 throw armnn::InvalidArgumentException("TfLiteArmnnOpaqueDelegate: No known backend specified.");
163 }
164 m_Options.SetBackends(backends);
165
166 TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO, "TfLiteArmnnOpaqueDelegate: Created TfLite ArmNN delegate.");
167}
168
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100169TfLiteStatus DoPrepare(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueDelegate* tfLiteDelegate, void* data)
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100170{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100171 // We are required to have the void* data parameter in the function signature, but we don't actually use it.
172 armnn::IgnoreUnused(data);
173
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100174 TfLiteIntArray* supportedOperators =
175 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100176 (TfLiteOpaqueDelegateGetData(tfLiteDelegate))->IdentifyOperatorsToDelegate(tfLiteContext);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100177 if(supportedOperators == nullptr)
178 {
179 return kTfLiteError;
180 }
181
182 // ArmNN Opaque Delegate Registration
183 TfLiteRegistrationExternal* kernelRegistration =
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100184 TfLiteRegistrationExternalCreate(kTfLiteBuiltinDelegate,
Ryan OShea59f8f652023-05-11 20:37:53 +0100185 "armnn_delegate",
Narumol Prangnawarat26654cb2023-05-03 16:08:11 +0100186 /*version=*/OPAQUE_DELEGATE_MAJOR_VERSION);
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100187 if(kernelRegistration == nullptr)
188 {
189 return kTfLiteError;
190 }
191
192 TfLiteRegistrationExternalSetInit(
193 kernelRegistration,
194 [](TfLiteOpaqueContext* tfLiteContext, const char* buffer, size_t length) -> void*
195 {
196 armnn::IgnoreUnused(length);
197 const TfLiteOpaqueDelegateParams* parameters =
198 reinterpret_cast<const TfLiteOpaqueDelegateParams*>(buffer);
199 if(parameters == nullptr)
200 {
201 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
202 "TfLiteArmnnOpaqueDelegate: Unable to get parameters.");
203 return nullptr;
204 }
205
206 return static_cast<void*>(
207 ArmnnSubgraph::Create(tfLiteContext,
208 parameters,
209 static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100210 parameters->delegate->opaque_delegate_builder->data)));
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100211 }
212 );
213
214 TfLiteRegistrationExternalSetFree(
215 kernelRegistration,
216 [](TfLiteOpaqueContext* tfLiteContext, void* buffer) -> void
217 {
218 armnn::IgnoreUnused(tfLiteContext);
219 if (buffer != nullptr)
220 {
221 delete static_cast<ArmnnSubgraph*>(buffer);
222 }
223 }
224 );
225
226 TfLiteRegistrationExternalSetPrepare(
227 kernelRegistration,
228 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
229 {
230 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
231 if (userData == nullptr)
232 {
233 return kTfLiteError;
234 }
235 return static_cast<ArmnnSubgraph*>(userData)->Prepare(tfLiteContext);
236 }
237 );
238
239 TfLiteRegistrationExternalSetInvoke(
240 kernelRegistration,
241 [](TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode) -> TfLiteStatus
242 {
243 void* userData = TfLiteOpaqueNodeGetUserData(tfLiteNode);
244 if (userData == nullptr)
245 {
246 return kTfLiteError;
247 }
248
249 return static_cast<ArmnnSubgraph*>(userData)->Invoke(tfLiteContext, tfLiteNode);
250 }
251 );
252
253 const TfLiteStatus status =
254 TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels(
255 tfLiteContext, kernelRegistration, supportedOperators, tfLiteDelegate);
256
257 TfLiteIntArrayFree(supportedOperators);
258 return status;
259}
260
Teresa Charlin3e4b6082023-10-19 19:13:29 +0100261TfLiteOpaqueDelegate* TfLiteArmnnOpaqueDelegateCreate(armnnDelegate::DelegateOptions options)
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000262{
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000263 auto* armnnDelegate = new ::armnnOpaqueDelegate::ArmnnOpaqueDelegate(options);
264 return TfLiteOpaqueDelegateCreate(armnnDelegate->GetDelegateBuilder());
265}
266
267::armnnDelegate::DelegateOptions TfLiteArmnnDelegateOptionsDefault()
268{
269 ::armnnDelegate::DelegateOptions options(armnn::Compute::CpuRef);
270 return options;
271}
272
273void TfLiteArmnnOpaqueDelegateDelete(TfLiteOpaqueDelegate* tfLiteDelegate)
274{
275 if (tfLiteDelegate != nullptr)
276 {
277 delete static_cast<::armnnOpaqueDelegate::ArmnnOpaqueDelegate*>(TfLiteOpaqueDelegateGetData(tfLiteDelegate));
278 TfLiteOpaqueDelegateDelete(tfLiteDelegate);
279 }
280}
281
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +0000282const std::string ArmnnOpaqueDelegate::GetVersion() {
283 return OPAQUE_DELEGATE_VERSION;
284}
285
Matthew Sloyan54cf0112023-04-03 16:32:57 +0100286TfLiteIntArray* ArmnnOpaqueDelegate::IdentifyOperatorsToDelegate(TfLiteOpaqueContext* tfLiteContext)
287{
288 TfLiteIntArray* executionPlan = nullptr;
289 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
290 {
291 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unable to get graph execution plan.");
292 return nullptr;
293 }
294
295 // Delegate data with null network
296 DelegateData delegateData(m_Options.GetBackends());
297
298 TfLiteIntArray* nodesToDelegate = TfLiteIntArrayCreate(executionPlan->size);
299 if (nodesToDelegate == nullptr)
300 {
301 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
302 "TfLiteArmnnOpaqueDelegate: Unable to create int array from execution plan.");
303 return nullptr;
304 }
305 nodesToDelegate->size = 0;
306
307 std::set<int32_t> unsupportedOperators;
308
309 for (int i = 0; i < executionPlan->size; ++i)
310 {
311 const int nodeIndex = executionPlan->data[i];
312
313 // If TfLiteOpaqueNodes can be delegated to ArmNN
314 TfLiteOpaqueNode* tfLiteNode = nullptr;
315 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
316
317 if (TfLiteOpaqueContextGetNodeAndRegistration(
318 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
319 {
320 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
321 "TfLiteArmnnOpaqueDelegate: Unable to get node and registration for node %d.",
322 nodeIndex);
323 continue;
324 }
325
326 TfLiteStatus visitStatus;
327 try
328 {
329 visitStatus = ArmnnSubgraph::VisitNode(
330 delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex);
331 }
332 catch(std::exception& ex)
333 {
334 ARMNN_LOG(error) << "ArmNN Failed to visit node with error: " << ex.what();
335 visitStatus = kTfLiteError;
336 }
337
338 if (visitStatus != kTfLiteOk)
339 {
340 // node is not supported by ArmNN
341 unsupportedOperators.insert(TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration));
342 continue;
343 }
344
345 nodesToDelegate->data[nodesToDelegate->size++] = nodeIndex;
346 }
347
348 for (std::set<int32_t>::iterator it=unsupportedOperators.begin(); it!=unsupportedOperators.end(); ++it)
349 {
350 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext,
351 "Operator %s [%d] is not supported by armnn_opaque_delegate.",
352 tflite::EnumNameBuiltinOperator(tflite::BuiltinOperator(*it)),
353 *it);
354 }
355
356 if (!unsupportedOperators.empty() && m_Options.TfLiteRuntimeFallbackDisabled())
357 {
358 std::stringstream exMessage;
359 exMessage << "TfLiteArmnnOpaqueDelegate: There are unsupported operators in the model. ";
360 exMessage << "Not falling back to TfLite Runtime as fallback is disabled. ";
361 exMessage << "This should only be disabled under test conditions.";
362 throw armnn::Exception(exMessage.str());
363 }
364 if (nodesToDelegate->size == 0)
365 {
366 ARMNN_LOG(info) << "No operators in this model are supported by the Arm NN TfLite delegate." <<
367 " The model will be executed entirely by TfLite runtime.";
368 }
369
370 std::sort(&nodesToDelegate->data[0], &nodesToDelegate->data[nodesToDelegate->size]);
371 return nodesToDelegate;
372}
373
Ryan OSheaac9607f2023-04-03 11:33:33 +0100374TfLiteStatus ArmnnSubgraph::AddInputLayer(DelegateData& delegateData,
375 TfLiteOpaqueContext* tfLiteContext,
376 const TfLiteIntArray* inputs,
377 std::vector<armnn::BindingPointInfo>& inputBindings)
378{
379 const size_t numInputs = static_cast<size_t>(inputs->size);
380 for (unsigned int i = 0; i < numInputs; ++i)
381 {
382 const int32_t tensorId = inputs->data[i];
383 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
384
385 if(!tensor)
386 {
387 return kTfLiteError;
388 }
389
390 // Do not create bindings for constant inputs
391 if (TfLiteOpaqueTensorGetAllocationType(tensor) == kTfLiteMmapRo)
392 {
393 continue;
394 }
395
396 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
397 armnn::IConnectableLayer* layer = delegateData.m_Network->AddInputLayer(bindingId);
398
399 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
400 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
401 outputSlot.SetTensorInfo(tensorInfo);
402
403 // Store for creating connections
404 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] = &outputSlot;
405
406 inputBindings.push_back(std::make_pair(bindingId, tensorInfo));
407 }
408
409 return kTfLiteOk;
410}
411
412TfLiteStatus ArmnnSubgraph::AddOutputLayer(DelegateData& delegateData,
413 TfLiteOpaqueContext* tfLiteContext,
414 const TfLiteIntArray* outputs,
415 std::vector<armnn::BindingPointInfo>& outputBindings)
416{
417 const size_t numOutputs = static_cast<size_t>(outputs->size);
418 for (unsigned int i = 0; i < numOutputs; ++i)
419 {
420 const int32_t tensorId = outputs->data[i];
421 const TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, tensorId);
422
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100423 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100424 {
425 return kTfLiteError;
426 }
427
428 auto bindingId = static_cast<armnn::LayerBindingId>((tensorId));
429 armnn::IConnectableLayer* layer = delegateData.m_Network->AddOutputLayer(bindingId);
430
431 auto tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tensor);
Ryan OSheac229b3f2023-06-27 22:34:54 +0100432
433 if (delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)] == nullptr)
434 {
435 return kTfLiteError;
436 }
437
Ryan OSheaac9607f2023-04-03 11:33:33 +0100438 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tensorId)]->Connect(layer->GetInputSlot(0));
439 outputBindings.push_back(std::make_pair(bindingId, tensorInfo));
440 }
441
442 return kTfLiteOk;
443}
444
445ArmnnSubgraph* ArmnnSubgraph::Create(TfLiteOpaqueContext* tfLiteContext,
446 const TfLiteOpaqueDelegateParams* parameters,
447 const ArmnnOpaqueDelegate* delegate)
448{
449 const auto startTime = armnn::GetTimeNow();
450 ARMNN_LOG(info) << "ArmnnSubgraph creation";
451
452 TfLiteIntArray* executionPlan;
453 if (TfLiteOpaqueContextGetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
454 {
455 return nullptr;
456 }
457
458 // Initialize DelegateData holds network and output slots information
459 DelegateData delegateData(delegate->m_Options.GetBackends());
460
461 // Build ArmNN Network
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000462 armnn::NetworkOptions networkOptions = delegate->m_Options.GetOptimizerOptions().GetModelOptions();
Ryan OSheaac9607f2023-04-03 11:33:33 +0100463 armnn::NetworkId networkId;
464 delegateData.m_Network = armnn::INetwork::Create(networkOptions);
465
466 delegateData.m_OutputSlotForNode = std::vector<armnn::IOutputSlot*>(
467 TfLiteOpaqueContextGetNumTensors(tfLiteContext), nullptr);
468
469 std::vector<armnn::BindingPointInfo> inputBindings;
470 std::vector<armnn::BindingPointInfo> outputBindings;
471
472 // Add input layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100473 if (AddInputLayer(delegateData, tfLiteContext, parameters->input_tensors, inputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100474 {
475 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Inputs to the network!");
476 }
477
478 // Parse TfLite delegate nodes to ArmNN
479 const auto parseStartTime = armnn::GetTimeNow();
480 for (int i = 0; i < parameters->nodes_to_replace->size; ++i)
481 {
482 const int nodeIndex = parameters->nodes_to_replace->data[i];
483
484 TfLiteOpaqueNode* tfLiteNode = nullptr;
485 TfLiteRegistrationExternal* tfLiteRegistration = nullptr;
486 if (TfLiteOpaqueContextGetNodeAndRegistration(
487 tfLiteContext, nodeIndex, &tfLiteNode, &tfLiteRegistration) != kTfLiteOk)
488 {
489 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to get node registration: " [ nodeIndex]);
490 }
491
492 if (VisitNode(delegateData, tfLiteContext, tfLiteRegistration, tfLiteNode, nodeIndex) != kTfLiteOk)
493 {
494 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Unable to parse node: " [ nodeIndex]);
495 }
496 }
497 ARMNN_LOG(info) << "Parse nodes to ArmNN time: " << std::setprecision(2)
498 << std::fixed << armnn::GetTimeDuration(parseStartTime).count() << " ms";
499
500 // Add Output layer
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100501 if (AddOutputLayer(delegateData, tfLiteContext, parameters->output_tensors, outputBindings) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100502 {
503 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to add Outputs to the network!");
504 }
505
506 // Optimize ArmNN network
507 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
508 try
509 {
510 const auto optimizeStartTime = armnn::GetTimeNow();
511 optNet = armnn::Optimize(*(delegateData.m_Network.get()),
512 delegate->m_Options.GetBackends(),
513 delegate->m_Runtime->GetDeviceSpec(),
514 delegate->m_Options.GetOptimizerOptions());
515 ARMNN_LOG(info) << "Optimize ArmnnSubgraph time: " << std::setprecision(2)
516 << std::fixed << armnn::GetTimeDuration(optimizeStartTime).count() << " ms";
517 }
518 catch (std::exception& ex)
519 {
520 std::stringstream exMessage;
521 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from optimize.";
522 throw armnn::Exception(exMessage.str());
523 }
524 if (!optNet)
525 {
526 // Optimize failed
527 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to optimize the network!");
528 }
529
530 // If set, we will serialize the optimized model into a dot file.
531 const std::string serializeToDotFile = delegate->m_Options.GetSerializeToDot();
532 if (!serializeToDotFile.empty())
533 {
534 ARMNN_LOG(info) << "Writing graph to dot file: " << serializeToDotFile;
535 fs::path filename = serializeToDotFile;
536 std::fstream file(filename.c_str(), std::ios_base::out);
537 optNet->SerializeToDot(file);
538 }
539
540 try
541 {
542 const auto loadStartTime = armnn::GetTimeNow();
543
544 // Load graph into runtime
545 std::string errorMessage;
546 armnn::Status loadingStatus;
547 armnn::MemorySource inputSource = armnn::MemorySource::Undefined;
548 armnn::MemorySource outputSource = armnn::MemorySource::Undefined;
549 // There's a bit of an assumption here that the delegate will only support Malloc memory source.
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000550 if (delegate->m_Options.GetOptimizerOptions().GetImportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100551 {
552 inputSource = armnn::MemorySource::Malloc;
553 }
John Mcloughlinc5ee0d72023-03-24 12:07:25 +0000554 if (delegate->m_Options.GetOptimizerOptions().GetExportEnabled())
Ryan OSheaac9607f2023-04-03 11:33:33 +0100555 {
556 outputSource = armnn::MemorySource::Malloc;
557 }
558 armnn::INetworkProperties networkProperties(false,
559 inputSource,
560 outputSource,
561 delegate->m_Options.GetInternalProfilingState(),
562 delegate->m_Options.GetInternalProfilingDetail());
563 loadingStatus = delegate->m_Runtime->LoadNetwork(networkId,
564 std::move(optNet),
565 errorMessage,
566 networkProperties);
567 if (loadingStatus != armnn::Status::Success)
568 {
569 // Network load failed.
570 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Network could not be loaded: " + errorMessage);
571 }
572
573 ARMNN_LOG(info) << "Load ArmnnSubgraph time: " << std::setprecision(2)
574 << std::fixed << armnn::GetTimeDuration(loadStartTime).count() << " ms";
575 }
576 catch (std::exception& ex)
577 {
578 std::stringstream exMessage;
579 exMessage << "TfLiteArmnnOpaqueDelegate: Exception (" << ex.what() << ") caught from LoadNetwork.";
580 throw armnn::Exception(exMessage.str());
581 }
582
583 // Register debug callback function
584 if (delegate->m_Options.GetDebugCallbackFunction().has_value())
585 {
586 delegate->m_Runtime->RegisterDebugCallback(networkId, delegate->m_Options.GetDebugCallbackFunction().value());
587 }
588
589 ARMNN_LOG(info) << "Overall ArmnnSubgraph creation time: " << std::setprecision(2)
590 << std::fixed << armnn::GetTimeDuration(startTime).count() << " ms\n";
591
592 // Create a new SubGraph with networkId and runtime
593 return new ArmnnSubgraph(networkId, delegate->m_Runtime, inputBindings, outputBindings);
594}
595
596TfLiteStatus ArmnnSubgraph::Prepare(TfLiteOpaqueContext* tfLiteContext)
597{
598 armnn::IgnoreUnused(tfLiteContext);
599 return kTfLiteOk;
600}
601
602TfLiteStatus ArmnnSubgraph::Invoke(TfLiteOpaqueContext* tfLiteContext, TfLiteOpaqueNode* tfLiteNode)
603{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100604 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
605 // This function turns inputIndexArray into an int array of indices. These indices point to the tensors for
606 // each input slot in the node.
607 const int* inputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100608 int numInputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100609 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100610 {
611 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph inputs!");
612 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100613 // Prepare inputs
614 armnn::InputTensors inputTensors;
615 size_t inputIndex = 0;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100616 for (int inputIdx = 0; inputIdx < numInputs; inputIdx++)
617 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100618 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputIndexArray[inputIdx]);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100619
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100620 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100621 {
622 return kTfLiteError;
623 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100624 // If tensor is not read only
Ryan OSheaac9607f2023-04-03 11:33:33 +0100625 if (TfLiteOpaqueTensorGetAllocationType(tensor) != kTfLiteMmapRo)
626 {
627 const armnn::BindingPointInfo& inputBinding = m_InputBindings[inputIndex];
628 armnn::TensorInfo inputTensorInfo = inputBinding.second;
629 inputTensorInfo.SetConstant(true);
630 const armnn::ConstTensor inputTensor(inputTensorInfo, TfLiteOpaqueTensorData(tensor));
Narumol Prangnawarat46e574e2023-05-05 16:39:05 +0100631 inputTensors.emplace_back(inputIndexArray[inputIdx], inputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100632
633 ++inputIndex;
634 }
635 }
636
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100637 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
638 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
639 // each output slot in the node.
640 const int* outputIndexArray;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100641 int numOutputs;
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100642 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Ryan OSheaac9607f2023-04-03 11:33:33 +0100643 {
644 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to load subgraph outputs!");
645 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100646 // Assign the tensors from the outputIndexArray to the armnn BindingPointInfo
647 armnn::OutputTensors outputTensors;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100648 for (int outputIdx = 0; outputIdx < numOutputs; outputIdx++)
649 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100650 const armnn::BindingPointInfo& outputBinding = m_OutputBindings[outputIdx];
651 TfLiteOpaqueTensor* tensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputIndexArray[outputIdx]);
652 if(!IsValid(tensor))
Ryan OSheaac9607f2023-04-03 11:33:33 +0100653 {
654 return kTfLiteError;
655 }
656
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100657 const armnn::Tensor outputTensor(outputBinding.second, reinterpret_cast<TfLiteTensor*>(tensor)->data
658 .data);
659 outputTensors.emplace_back(outputIndexArray[outputIdx], outputTensor);
Ryan OSheaac9607f2023-04-03 11:33:33 +0100660 }
661
662 // Run graph
David Monahan727d0172023-10-04 10:16:24 +0100663 try
Ryan OSheaac9607f2023-04-03 11:33:33 +0100664 {
David Monahan727d0172023-10-04 10:16:24 +0100665 auto status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
666 // The delegate holds its own Arm NN runtime so this is our last chance to print internal profiling data.
667 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
668 if (profiler && profiler->IsProfilingEnabled())
669 {
670 profiler->Print(std::cout);
671 }
672 return (status == armnn::Status::Success) ? kTfLiteOk : kTfLiteError;
Ryan OSheaac9607f2023-04-03 11:33:33 +0100673 }
David Monahan727d0172023-10-04 10:16:24 +0100674 catch (armnn::InvalidArgumentException& ex)
675 {
676 ARMNN_LOG(error) << "ArmNN Failed to EnqueueWorkload with error: " << ex.what();
677 // This should really be kTfLiteDelegateError but the Delegate Test Suite expects kTfLiteError so we return
678 // that instead
679 return kTfLiteError;
680 }
681
Ryan OSheaac9607f2023-04-03 11:33:33 +0100682}
683
684TfLiteStatus ArmnnSubgraph::VisitNode(DelegateData& delegateData,
685 TfLiteOpaqueContext* tfLiteContext,
686 TfLiteRegistrationExternal* tfLiteRegistration,
687 TfLiteOpaqueNode* tfLiteNode,
688 int nodeIndex)
689{
690 switch (TfLiteRegistrationExternalGetBuiltInCode(tfLiteRegistration))
691 {
Teresa Charlinf69ae562023-04-27 14:42:23 +0100692 case kTfLiteBuiltinAbs:
693 return VisitElementwiseUnaryOperator(delegateData,
694 tfLiteContext,
695 tfLiteNode,
696 nodeIndex,
697 kTfLiteBuiltinAbs,
698 armnn::UnaryOperation::Abs);
David Monahan6c53f9f2023-04-27 15:21:19 +0100699 case kTfLiteBuiltinAdd:
700 return VisitElementwiseBinaryOperator(delegateData,
701 tfLiteContext,
702 tfLiteNode,
703 nodeIndex,
704 kTfLiteBuiltinAdd);
John Mcloughlin559d9092023-04-26 20:14:47 +0100705 case kTfLiteBuiltinArgMax:
706 return VisitArgMinMaxOperator(delegateData,
707 tfLiteContext,
708 tfLiteNode,
709 nodeIndex,
710 kTfLiteBuiltinArgMax);
711 case kTfLiteBuiltinArgMin:
712 return VisitArgMinMaxOperator(delegateData,
713 tfLiteContext,
714 tfLiteNode,
715 nodeIndex,
716 kTfLiteBuiltinArgMin);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100717 case kTfLiteBuiltinAveragePool2d:
718 return VisitPooling2dOperator(delegateData,
719 tfLiteContext,
720 tfLiteNode,
721 nodeIndex,
722 kTfLiteBuiltinAveragePool2d);
John Mcloughlin0422cf22023-04-27 16:55:00 +0100723 case kTfLiteBuiltinBatchMatmul:
724 return VisitBatchMatMulOperator(delegateData,
725 tfLiteContext,
726 tfLiteNode,
727 nodeIndex,
728 kTfLiteBuiltinBatchMatmul);
Idriss Chaouchcbf79292023-09-08 11:18:16 +0100729 case kTfLiteBuiltinBroadcastTo:
730 return VisitBroadcastToOperator(delegateData,
731 tfLiteContext,
732 tfLiteNode,
733 nodeIndex,
734 kTfLiteBuiltinBroadcastTo);
Kevin May81b66f32023-04-26 14:55:36 +0100735 case kTfLiteBuiltinBatchToSpaceNd:
736 return VisitBatchToSpaceNdOperator(delegateData,
737 tfLiteContext,
738 tfLiteNode,
739 nodeIndex,
740 kTfLiteBuiltinBatchToSpaceNd);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100741 case kTfLiteBuiltinCast:
742 return VisitCastOperator(delegateData,
743 tfLiteContext,
744 tfLiteNode,
745 nodeIndex,
746 kTfLiteBuiltinCast);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100747 case kTfLiteBuiltinCeil:
748 return VisitElementwiseUnaryOperator(delegateData,
749 tfLiteContext,
750 tfLiteNode,
751 nodeIndex,
752 kTfLiteBuiltinCeil,
753 armnn::UnaryOperation::Ceil);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100754 case kTfLiteBuiltinConcatenation:
755 return VisitControlOperator(delegateData,
756 tfLiteContext,
757 tfLiteNode,
758 nodeIndex,
759 kTfLiteBuiltinConcatenation);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100760 case kTfLiteBuiltinConv2d:
761 return VisitConvolutionOperator(delegateData,
762 tfLiteContext,
763 tfLiteNode,
764 nodeIndex,
765 kTfLiteBuiltinConv2d);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100766 case kTfLiteBuiltinConv3d:
767 return VisitConvolutionOperator(delegateData,
768 tfLiteContext,
769 tfLiteNode,
770 nodeIndex,
771 kTfLiteBuiltinConv3d);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100772 case kTfLiteBuiltinCustom:
773 {
774 // Custom operators are defined by the name rather than the builtin code.
775 // Parse the custom_name param in the registration to point to the correct visitor function.
776 std::string customOperatorName = TfLiteRegistrationExternalGetCustomName(tfLiteRegistration);
777 if ( customOperatorName == "AveragePool3D" )
778 {
779 return VisitPooling3dOperator(delegateData,
780 tfLiteContext,
781 tfLiteNode,
782 nodeIndex,
783 customOperatorName);
784 }
785 else if (customOperatorName == "MaxPool3D")
786 {
787 return VisitPooling3dOperator(delegateData,
788 tfLiteContext,
789 tfLiteNode,
790 nodeIndex,
791 customOperatorName);
792 }
793 // Invalid or unsupported custom operator
794 return kTfLiteError;
795 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100796 case kTfLiteBuiltinDepthwiseConv2d:
797 return VisitConvolutionOperator(delegateData,
798 tfLiteContext,
799 tfLiteNode,
800 nodeIndex,
801 kTfLiteBuiltinDepthwiseConv2d);
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100802 case kTfLiteBuiltinDequantize:
803 return VisitDequantizeOperator(delegateData,
804 tfLiteContext,
805 tfLiteNode,
806 nodeIndex,
807 kTfLiteBuiltinDequantize);
David Monahan6c53f9f2023-04-27 15:21:19 +0100808 case kTfLiteBuiltinDiv:
809 return VisitElementwiseBinaryOperator(delegateData,
810 tfLiteContext,
811 tfLiteNode,
812 nodeIndex,
813 kTfLiteBuiltinDiv);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100814 case kTfLiteBuiltinEqual:
815 return VisitComparisonOperator(delegateData,
816 tfLiteContext,
817 tfLiteNode,
818 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100819 kTfLiteBuiltinEqual,
820 armnn::ComparisonOperation::Equal);
Teresa Charlin42362962023-04-28 14:23:33 +0100821 case kTfLiteBuiltinDepthToSpace:
822 return VisitDepthToSpaceOperator(delegateData,
823 tfLiteContext,
824 tfLiteNode,
825 nodeIndex,
826 kTfLiteBuiltinDepthToSpace);
827 case kTfLiteBuiltinElu:
828 return VisitActivationOperator(delegateData,
829 tfLiteContext,
830 tfLiteNode,
831 nodeIndex,
832 kTfLiteBuiltinElu);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100833 case kTfLiteBuiltinExp:
834 return VisitElementwiseUnaryOperator(delegateData,
835 tfLiteContext,
836 tfLiteNode,
837 nodeIndex,
838 kTfLiteBuiltinExp,
839 armnn::UnaryOperation::Exp);
Matthew Sloyan3504e422023-05-03 13:53:02 +0100840 case kTfLiteBuiltinExpandDims:
841 return VisitExpandDimsOperator(delegateData,
842 tfLiteContext,
843 tfLiteNode,
844 nodeIndex,
845 kTfLiteBuiltinExpandDims);
Ryan OShea59f8f652023-05-11 20:37:53 +0100846 case kTfLiteBuiltinFill:
847 return VisitFillOperator(delegateData,
848 tfLiteContext,
849 tfLiteNode,
850 nodeIndex,
851 kTfLiteBuiltinFill);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100852 case kTfLiteBuiltinFloor:
853 return VisitFloorOperator(delegateData,
854 tfLiteContext,
855 tfLiteNode,
856 nodeIndex,
857 kTfLiteBuiltinFloor);
David Monahan6c53f9f2023-04-27 15:21:19 +0100858 case kTfLiteBuiltinFloorDiv:
859 return VisitElementwiseBinaryOperator(delegateData,
860 tfLiteContext,
861 tfLiteNode,
862 nodeIndex,
863 kTfLiteBuiltinFloorDiv);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100864 case kTfLiteBuiltinFullyConnected:
865 return VisitFullyConnectedOperator(delegateData,
866 tfLiteContext,
867 tfLiteNode,
868 nodeIndex,
869 kTfLiteBuiltinFullyConnected);
Kevin Mayb2831c52023-04-26 17:27:24 +0100870 case kTfLiteBuiltinGather:
871 return VisitGatherOperator(delegateData,
872 tfLiteContext,
873 tfLiteNode,
874 nodeIndex,
875 kTfLiteBuiltinGather);
876 case kTfLiteBuiltinGatherNd:
877 return VisitGatherNdOperator(delegateData,
878 tfLiteContext,
879 tfLiteNode,
880 nodeIndex,
881 kTfLiteBuiltinGatherNd);
Teresa Charlin077cddb2023-09-15 15:19:21 +0100882 case kTfLiteBuiltinGelu:
883 return VisitActivationOperator(delegateData,
884 tfLiteContext,
885 tfLiteNode,
886 nodeIndex,
887 kTfLiteBuiltinGelu);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100888 case kTfLiteBuiltinGreater:
889 return VisitComparisonOperator(delegateData,
890 tfLiteContext,
891 tfLiteNode,
892 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100893 kTfLiteBuiltinGreater,
894 armnn::ComparisonOperation::Greater);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100895 case kTfLiteBuiltinGreaterEqual:
896 return VisitComparisonOperator(delegateData,
897 tfLiteContext,
898 tfLiteNode,
899 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100900 kTfLiteBuiltinGreaterEqual,
901 armnn::ComparisonOperation::GreaterOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100902 case kTfLiteBuiltinHardSwish:
903 return VisitActivationOperator(delegateData,
904 tfLiteContext,
905 tfLiteNode,
906 nodeIndex,
907 kTfLiteBuiltinHardSwish);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100908 case kTfLiteBuiltinL2Normalization:
909 return VisitL2NormalizationOperator(delegateData,
910 tfLiteContext,
911 tfLiteNode,
912 nodeIndex,
913 kTfLiteBuiltinL2Normalization);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100914 case kTfLiteBuiltinL2Pool2d:
915 return VisitPooling2dOperator(delegateData,
916 tfLiteContext,
917 tfLiteNode,
918 nodeIndex,
919 kTfLiteBuiltinL2Pool2d);
Tianle Chengae931732023-07-28 11:53:04 +0100920 case kTfLiteBuiltinLeakyRelu:
921 return VisitActivationOperator(delegateData,
922 tfLiteContext,
923 tfLiteNode,
924 nodeIndex,
925 kTfLiteBuiltinLeakyRelu);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100926 case kTfLiteBuiltinLess:
927 return VisitComparisonOperator(delegateData,
928 tfLiteContext,
929 tfLiteNode,
930 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100931 kTfLiteBuiltinLess,
932 armnn::ComparisonOperation::Less);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100933 case kTfLiteBuiltinLessEqual:
934 return VisitComparisonOperator(delegateData,
935 tfLiteContext,
936 tfLiteNode,
937 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +0100938 kTfLiteBuiltinLessEqual,
939 armnn::ComparisonOperation::LessOrEqual);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100940 case kTfLiteBuiltinLogistic:
941 return VisitActivationOperator(delegateData,
942 tfLiteContext,
943 tfLiteNode,
944 nodeIndex,
945 kTfLiteBuiltinLogistic);
Teresa Charlinf69ae562023-04-27 14:42:23 +0100946 case kTfLiteBuiltinLocalResponseNormalization:
947 return VisitLocalResponseNormalizationOperator(delegateData,
948 tfLiteContext,
949 tfLiteNode,
950 nodeIndex,
951 kTfLiteBuiltinLocalResponseNormalization);
952 case kTfLiteBuiltinLog:
953 return VisitElementwiseUnaryOperator(delegateData,
954 tfLiteContext,
955 tfLiteNode,
956 nodeIndex,
957 kTfLiteBuiltinLog,
958 armnn::UnaryOperation::Log);
959 case kTfLiteBuiltinLogicalAnd:
960 return VisitLogicalBinaryOperator(delegateData,
961 tfLiteContext,
962 tfLiteNode,
963 nodeIndex,
964 kTfLiteBuiltinLogicalAnd,
965 armnn::LogicalBinaryOperation::LogicalAnd);
966 case kTfLiteBuiltinLogicalNot:
967 return VisitElementwiseUnaryOperator(delegateData,
968 tfLiteContext,
969 tfLiteNode,
970 nodeIndex,
971 kTfLiteBuiltinLogicalNot,
972 armnn::UnaryOperation::LogicalNot);
973 case kTfLiteBuiltinLogicalOr:
974 return VisitLogicalBinaryOperator(delegateData,
975 tfLiteContext,
976 tfLiteNode,
977 nodeIndex,
978 kTfLiteBuiltinLogicalOr,
979 armnn::LogicalBinaryOperation::LogicalOr);
Teresa Charlin42362962023-04-28 14:23:33 +0100980 case kTfLiteBuiltinLogSoftmax:
981 return VisitSoftmaxOperator(delegateData,
982 tfLiteContext,
983 tfLiteNode,
984 nodeIndex,
985 kTfLiteBuiltinLogSoftmax);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100986 case kTfLiteBuiltinLstm:
987 return VisitLstmOperator(delegateData,
988 tfLiteContext,
989 tfLiteNode,
990 nodeIndex,
991 kTfLiteBuiltinLstm);
992 case kTfLiteBuiltinMaxPool2d:
993 return VisitPooling2dOperator(delegateData,
994 tfLiteContext,
995 tfLiteNode,
996 nodeIndex,
997 kTfLiteBuiltinMaxPool2d);
David Monahan6c53f9f2023-04-27 15:21:19 +0100998 case kTfLiteBuiltinMaximum:
999 return VisitElementwiseBinaryOperator(delegateData,
1000 tfLiteContext,
1001 tfLiteNode,
1002 nodeIndex,
1003 kTfLiteBuiltinMaximum);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01001004 case kTfLiteBuiltinMean:
1005 return VisitControlOperator(delegateData,
1006 tfLiteContext,
1007 tfLiteNode,
1008 nodeIndex,
1009 kTfLiteBuiltinMean);
David Monahan6c53f9f2023-04-27 15:21:19 +01001010 case kTfLiteBuiltinMinimum:
1011 return VisitElementwiseBinaryOperator(delegateData,
1012 tfLiteContext,
1013 tfLiteNode,
1014 nodeIndex,
1015 kTfLiteBuiltinMinimum);
Ryan OShea59f8f652023-05-11 20:37:53 +01001016 case kTfLiteBuiltinMirrorPad:
1017 return VisitPadOperator(delegateData,
1018 tfLiteContext,
1019 tfLiteNode,
1020 nodeIndex,
1021 kTfLiteBuiltinMirrorPad);
David Monahan6c53f9f2023-04-27 15:21:19 +01001022 case kTfLiteBuiltinMul:
1023 return VisitElementwiseBinaryOperator(delegateData,
1024 tfLiteContext,
1025 tfLiteNode,
1026 nodeIndex,
1027 kTfLiteBuiltinMul);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001028 case kTfLiteBuiltinNeg:
1029 return VisitElementwiseUnaryOperator(delegateData,
1030 tfLiteContext,
1031 tfLiteNode,
1032 nodeIndex,
1033 kTfLiteBuiltinNeg,
1034 armnn::UnaryOperation::Neg);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01001035 case kTfLiteBuiltinNotEqual:
1036 return VisitComparisonOperator(delegateData,
1037 tfLiteContext,
1038 tfLiteNode,
1039 nodeIndex,
Teresa Charlinf69ae562023-04-27 14:42:23 +01001040 kTfLiteBuiltinNotEqual,
1041 armnn::ComparisonOperation::NotEqual);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001042 case kTfLiteBuiltinPack:
1043 return VisitPackOperator(delegateData,
1044 tfLiteContext,
1045 tfLiteNode,
1046 nodeIndex,
1047 kTfLiteBuiltinPack);
1048 case kTfLiteBuiltinPad:
1049 return VisitPadOperator(delegateData,
1050 tfLiteContext,
1051 tfLiteNode,
1052 nodeIndex,
1053 kTfLiteBuiltinPad);
1054 case kTfLiteBuiltinPadv2:
1055 return VisitPadOperator(delegateData,
1056 tfLiteContext,
1057 tfLiteNode,
1058 nodeIndex,
1059 kTfLiteBuiltinPadv2);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001060 case kTfLiteBuiltinPow:
1061 return VisitElementwiseBinaryOperator(delegateData,
1062 tfLiteContext,
1063 tfLiteNode,
1064 nodeIndex,
1065 kTfLiteBuiltinPow);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001066 case kTfLiteBuiltinPrelu:
1067 return VisitPreluOperator(delegateData,
1068 tfLiteContext,
1069 tfLiteNode,
1070 nodeIndex,
1071 kTfLiteBuiltinPrelu);
Francis Murtagh36d94ef2023-04-28 14:05:43 +01001072 case kTfLiteBuiltinQuantize:
1073 return VisitQuantizeOperator(delegateData,
1074 tfLiteContext,
1075 tfLiteNode,
1076 nodeIndex,
1077 kTfLiteBuiltinQuantize);
John Mcloughlin083586d2023-04-28 18:36:52 +01001078 case kTfLiteBuiltinReduceMax:
1079 return VisitReduceOperator(delegateData,
1080 tfLiteContext,
1081 tfLiteNode,
1082 nodeIndex,
1083 kTfLiteBuiltinReduceMax);
1084 case kTfLiteBuiltinReduceMin:
1085 return VisitReduceOperator(delegateData,
1086 tfLiteContext,
1087 tfLiteNode,
1088 nodeIndex,
1089 kTfLiteBuiltinReduceMin);
1090 case kTfLiteBuiltinReduceProd:
1091 return VisitReduceOperator(delegateData,
1092 tfLiteContext,
1093 tfLiteNode,
1094 nodeIndex,
1095 kTfLiteBuiltinReduceProd);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001096 case kTfLiteBuiltinRelu:
1097 return VisitActivationOperator(delegateData,
1098 tfLiteContext,
1099 tfLiteNode,
1100 nodeIndex,
1101 kTfLiteBuiltinRelu);
1102 case kTfLiteBuiltinReluN1To1:
1103 return VisitActivationOperator(delegateData,
1104 tfLiteContext,
1105 tfLiteNode,
1106 nodeIndex,
1107 kTfLiteBuiltinReluN1To1);
1108 case kTfLiteBuiltinRelu6:
1109 return VisitActivationOperator(delegateData,
1110 tfLiteContext,
1111 tfLiteNode,
1112 nodeIndex,
1113 kTfLiteBuiltinRelu6);
Matthew Sloyanc49aacc2023-04-28 17:27:26 +01001114 case kTfLiteBuiltinReshape:
1115 return VisitReshapeOperator(delegateData,
1116 tfLiteContext,
1117 tfLiteNode,
1118 nodeIndex,
1119 kTfLiteBuiltinReshape);
John Mcloughlin083586d2023-04-28 18:36:52 +01001120 case kTfLiteBuiltinResizeNearestNeighbor:
1121 return VisitResizeOperator(delegateData,
1122 tfLiteContext,
1123 tfLiteNode,
1124 nodeIndex,
1125 kTfLiteBuiltinResizeNearestNeighbor);
1126 case kTfLiteBuiltinResizeBilinear:
1127 return VisitResizeOperator(delegateData,
1128 tfLiteContext,
1129 tfLiteNode,
1130 nodeIndex,
1131 kTfLiteBuiltinResizeBilinear);
Tracy Narine7306bbe2023-07-17 16:06:26 +01001132 case kTfLiteBuiltinReverseV2:
1133 return VisitReverseV2Operator(delegateData,
1134 tfLiteContext,
1135 tfLiteNode,
1136 nodeIndex,
1137 kTfLiteBuiltinReverseV2);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001138 case kTfLiteBuiltinRsqrt:
1139 return VisitElementwiseUnaryOperator(delegateData,
1140 tfLiteContext,
1141 tfLiteNode,
1142 nodeIndex,
1143 kTfLiteBuiltinRsqrt,
1144 armnn::UnaryOperation::Rsqrt);
John Mcloughlin0422cf22023-04-27 16:55:00 +01001145 case kTfLiteBuiltinShape:
1146 return VisitShapeOperator(delegateData,
1147 tfLiteContext,
1148 tfLiteNode,
1149 nodeIndex,
1150 kTfLiteBuiltinShape);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001151 case kTfLiteBuiltinSin:
1152 return VisitElementwiseUnaryOperator(delegateData,
1153 tfLiteContext,
1154 tfLiteNode,
1155 nodeIndex,
1156 kTfLiteBuiltinSin,
1157 armnn::UnaryOperation::Sin);
Teresa Charlin86b03572023-04-28 13:19:12 +01001158 case kTfLiteBuiltinSlice:
1159 return VisitSliceOperator(delegateData,
1160 tfLiteContext,
1161 tfLiteNode,
1162 nodeIndex,
1163 kTfLiteBuiltinSlice);
Teresa Charlin42362962023-04-28 14:23:33 +01001164 case kTfLiteBuiltinSoftmax:
1165 return VisitSoftmaxOperator(delegateData,
1166 tfLiteContext,
1167 tfLiteNode,
1168 nodeIndex,
1169 kTfLiteBuiltinSoftmax);
Kevin May81b66f32023-04-26 14:55:36 +01001170 case kTfLiteBuiltinSpaceToBatchNd:
1171 return VisitSpaceToBatchNdOperator(delegateData,
1172 tfLiteContext,
1173 tfLiteNode,
1174 nodeIndex,
1175 kTfLiteBuiltinSpaceToBatchNd);
Teresa Charlin42362962023-04-28 14:23:33 +01001176 case kTfLiteBuiltinSpaceToDepth:
1177 return VisitSpaceToDepthOperator(delegateData,
1178 tfLiteContext,
1179 tfLiteNode,
1180 nodeIndex,
1181 kTfLiteBuiltinSpaceToDepth);
David Monahanc833cef2023-05-03 15:53:03 +01001182 case kTfLiteBuiltinSplit:
1183 return VisitSplitOperator(delegateData,
1184 tfLiteContext,
1185 tfLiteNode,
1186 nodeIndex,
1187 kTfLiteBuiltinSplit);
1188 case kTfLiteBuiltinSplitV:
1189 return VisitSplitVOperator(delegateData,
1190 tfLiteContext,
1191 tfLiteNode,
1192 nodeIndex,
1193 kTfLiteBuiltinSplitV);
John Mcloughlin0ec00872023-05-15 17:03:49 +01001194 case kTfLiteBuiltinSquaredDifference:
1195 return VisitElementwiseBinaryOperator(delegateData,
1196 tfLiteContext,
1197 tfLiteNode,
1198 nodeIndex,
1199 kTfLiteBuiltinSquaredDifference);
David Monahan6c53f9f2023-04-27 15:21:19 +01001200 case kTfLiteBuiltinSub:
1201 return VisitElementwiseBinaryOperator(delegateData,
1202 tfLiteContext,
1203 tfLiteNode,
1204 nodeIndex,
1205 kTfLiteBuiltinSub);
Teresa Charlinf69ae562023-04-27 14:42:23 +01001206 case kTfLiteBuiltinSqrt:
1207 return VisitElementwiseUnaryOperator(delegateData,
1208 tfLiteContext,
1209 tfLiteNode,
1210 nodeIndex,
1211 kTfLiteBuiltinSqrt,
1212 armnn::UnaryOperation::Sqrt);
Matthew Sloyan3504e422023-05-03 13:53:02 +01001213 case kTfLiteBuiltinSqueeze:
1214 return VisitSqueezeOperator(delegateData,
1215 tfLiteContext,
1216 tfLiteNode,
1217 nodeIndex,
1218 kTfLiteBuiltinSqueeze);
Teresa Charlin86b03572023-04-28 13:19:12 +01001219 case kTfLiteBuiltinStridedSlice:
1220 return VisitStridedSliceOperator(delegateData,
1221 tfLiteContext,
1222 tfLiteNode,
1223 nodeIndex,
1224 kTfLiteBuiltinStridedSlice);
John Mcloughlin083586d2023-04-28 18:36:52 +01001225 case kTfLiteBuiltinSum:
1226 return VisitReduceOperator(delegateData,
1227 tfLiteContext,
1228 tfLiteNode,
1229 nodeIndex,
1230 kTfLiteBuiltinSum);
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01001231 case kTfLiteBuiltinTanh:
1232 return VisitActivationOperator(delegateData,
1233 tfLiteContext,
1234 tfLiteNode,
1235 nodeIndex,
1236 kTfLiteBuiltinTanh);
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001237 case kTfLiteBuiltinTile:
1238 return VisitTileOperator(delegateData,
1239 tfLiteContext,
1240 tfLiteNode,
1241 nodeIndex,
1242 kTfLiteBuiltinTile);
Teresa Charlin42362962023-04-28 14:23:33 +01001243 case kTfLiteBuiltinTranspose:
1244 return VisitTransposeOperator(delegateData,
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001245 tfLiteContext,
1246 tfLiteNode,
1247 nodeIndex,
1248 kTfLiteBuiltinTranspose);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001249 case kTfLiteBuiltinTransposeConv:
1250 return VisitConvolutionOperator(delegateData,
1251 tfLiteContext,
1252 tfLiteNode,
1253 nodeIndex,
1254 kTfLiteBuiltinTransposeConv);
Matthew Sloyan74be13e2023-05-03 17:34:00 +01001255 case kTfLiteBuiltinUnidirectionalSequenceLstm:
1256 return VisitUnidirectionalSequenceLstmOperator(delegateData,
1257 tfLiteContext,
1258 tfLiteNode,
1259 nodeIndex,
1260 kTfLiteBuiltinUnidirectionalSequenceLstm);
Teresa Charlinecebb0f2023-04-27 21:37:56 +01001261 case kTfLiteBuiltinUnpack:
1262 return VisitUnpackOperator(delegateData,
1263 tfLiteContext,
1264 tfLiteNode,
1265 nodeIndex,
1266 kTfLiteBuiltinUnpack);
Ryan OSheaac9607f2023-04-03 11:33:33 +01001267 default:
1268 return kTfLiteError;
1269 }
1270}
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +01001271} // armnnOpaqueDelegate namespace