blob: fd943c8ec9bf42a1483a95cc238f9e2cdb913c62 [file] [log] [blame]
Matthew Sloyan11572322023-03-16 10:17:51 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9#include <DelegateUtils.hpp>
10
11#include <armnn/ArmNN.hpp>
12#include <armnn/BackendHelper.hpp>
13#include <armnn/utility/Assert.hpp>
14#include <armnn/utility/NumericCast.hpp>
15
16#include <armnnUtils/Permute.hpp>
17#include <armnnUtils/TensorUtils.hpp>
18
19#include <tensorflow/lite/builtin_ops.h>
20#include <tensorflow/lite/c/builtin_op_data.h>
21#include <tensorflow/lite/c/common.h>
22#include <tensorflow/lite/c/c_api_opaque.h>
23#include <tensorflow/lite/minimal_logging.h>
24#include <tensorflow/lite/kernels/kernel_util.h>
25
26namespace
27{
28
29// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
30#define FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
31try \
32{ \
33 for (auto&& backendId : backends) \
34 { \
35 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
36 if (layerSupportObject.IsBackendRegistered()) \
37 { \
38 std::string reasonIfUnsupported; \
39 supported = \
40 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
41 if (supported) \
42 { \
43 setBackend = backendId; \
44 break; \
45 } \
46 else \
47 { \
48 if (reasonIfUnsupported.size() > 0) \
49 { \
50 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
51 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
52 } \
53 else \
54 { \
55 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
56 "%s: not supported by armnn", opName); \
57 } \
58 } \
59 } \
60 else \
61 { \
62 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", \
63 opName, backendId.Get().c_str()); \
64 } \
65 } \
66 if (!supported) \
67 { \
68 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
69 } \
70} \
71catch (const armnn::InvalidArgumentException &e) \
72{ \
73 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
74}
75
76TfLiteStatus ValidateNumInputs(TfLiteOpaqueContext* tfLiteContext,
77 TfLiteOpaqueNode* tfLiteNode,
78 const unsigned int expectedSize,
79 int nodeIndex)
80{
81 int numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
82 if (static_cast<unsigned int>(numInputs) != expectedSize)
83 {
84 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
85 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of inputs (%d != %d) in node #%d",
86 numInputs, expectedSize, nodeIndex);
87 return kTfLiteError;
88 }
89 return kTfLiteOk;
90}
91
92TfLiteStatus ValidateNumOutputs(TfLiteOpaqueContext* tfLiteContext,
93 TfLiteOpaqueNode* tfLiteNode,
94 const unsigned int expectedSize,
95 int nodeIndex)
96{
97 auto numOutputs = TfLiteOpaqueNodeNumberOfOutputs(tfLiteNode);
98 if (static_cast<unsigned int>(numOutputs) != expectedSize)
99 {
100 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
101 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (%d != %d) in node #%d",
102 numOutputs, expectedSize, nodeIndex);
103 return kTfLiteError;
104 }
105 return kTfLiteOk;
106}
107
108bool IsConstantTensor(const TfLiteOpaqueTensor* tfLiteTensor)
109{
110 auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
111 if (tensorAllocationType == kTfLiteMmapRo)
112 {
113 return true;
114 }
115 return false;
116}
117
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100118bool IsDynamicTensor(const TfLiteOpaqueTensor* tfLiteTensor)
Matthew Sloyan11572322023-03-16 10:17:51 +0000119{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100120 auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
Matthew Sloyan11572322023-03-16 10:17:51 +0000121 if (tensorAllocationType == kTfLiteDynamic)
122 {
123 return true;
124 }
125 return false;
126}
127
128bool IsValid(const TfLiteOpaqueTensor* tfLiteTensor)
129{
130 return tfLiteTensor == nullptr ? false : true;
131}
132
133bool IsValid(TfLiteOpaqueContext* tfLiteContext,
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100134 const TfLiteOpaqueTensor* tfLiteTensor,
Matthew Sloyan11572322023-03-16 10:17:51 +0000135 int32_t operatorCode,
136 int32_t nodeIndex)
137{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100138 if(!IsValid(tfLiteTensor))
Matthew Sloyan11572322023-03-16 10:17:51 +0000139 {
140 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
141 tfLiteContext,
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100142 "TfLiteArmnnOpaqueDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
Matthew Sloyan11572322023-03-16 10:17:51 +0000143 operatorCode, nodeIndex);
144 return false;
145 }
146 if (IsDynamicTensor(tfLiteTensor))
147 {
148 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
149 tfLiteContext,
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100150 "TfLiteArmnnOpaqueDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
Matthew Sloyan11572322023-03-16 10:17:51 +0000151 operatorCode, nodeIndex);
152 return false;
153 }
154 return true;
155}
156
157bool IsAffineQuantization(const TfLiteOpaqueTensor& tfLiteTensor)
158{
159 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(&tfLiteTensor);
160 if (quantizationInfo.type == kTfLiteAffineQuantization)
161 {
162 return true;
163 }
164 return false;
165}
166
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100167// Connects the layer to the graph
Matthew Sloyan11572322023-03-16 10:17:51 +0000168TfLiteStatus Connect(armnn::IConnectableLayer* layer,
169 TfLiteOpaqueContext* tfLiteContext,
170 TfLiteOpaqueNode* tfLiteNode,
171 armnnOpaqueDelegate::DelegateData& data)
172{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100173 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
174 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
175 // tensors for each input slot in the node.
176 const int* inputIndexArray;
177 int numInputs;
178 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000179 {
180 return kTfLiteError;
181 }
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100182 // We can't validate the number of inputs vs the layer->GetNumOutputSlots() as some operators differ.
183 // An example is Mean where the number of TFLite inputs is 2, but number of Arm NN inputs is 1,
184 // as we store the axis within the descriptor.
185
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100186 // Connect the input slots.
187 // For each input slot, get the index of the opaque tensor that was allocated for it.
Matthew Sloyan11572322023-03-16 10:17:51 +0000188 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
189 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100190 if (data.m_OutputSlotForNode[inputIndexArray[inputIndex]] != nullptr)
Matthew Sloyan11572322023-03-16 10:17:51 +0000191 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100192 data.m_OutputSlotForNode[inputIndexArray[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
Matthew Sloyan11572322023-03-16 10:17:51 +0000193 }
194 }
195
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100196 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
197 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
198 // each output slot in the node.
199 const int* outputIndexArray;
200 int numOutputs;
201 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000202 {
203 return kTfLiteError;
204 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100205 // numOutputs is set from TfLiteOpaqueNodeOutputs.
206 if(numOutputs != static_cast<int>(layer->GetNumOutputSlots()))
207 {
208 ARMNN_LOG(error) << "Layer: " << layer->GetName() << ": Expected number of output slots does not match actual "
209 "number of output slots.";
210 return kTfLiteError;
211 }
Matthew Sloyan11572322023-03-16 10:17:51 +0000212
213 // Prepare output slots
214 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
215 {
216 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100217 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndexArray[outputIndex])] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000218 }
219
220 return kTfLiteOk;
221}
222
223TfLiteStatus FusedActivation(TfLiteOpaqueContext* tfLiteContext,
224 TfLiteOpaqueNode* tfLiteNode,
225 TfLiteFusedActivation activationType,
226 armnn::IConnectableLayer* prevLayer,
227 unsigned int outputSlotIndex,
228 armnnOpaqueDelegate::DelegateData& data)
229{
230 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
231
232 armnn::ActivationDescriptor activationDesc;
233
234 switch (activationType)
235 {
236 case kTfLiteActNone:
237 {
238 // No Activation
239 return kTfLiteOk;
240 }
241 case kTfLiteActRelu:
242 {
243 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
244 break;
245 }
246 case kTfLiteActReluN1To1:
247 {
248 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
249 activationDesc.m_A = 1.0f;
250 activationDesc.m_B = -1.0f;
251 break;
252 }
253 case kTfLiteActRelu6:
254 {
255 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
256 activationDesc.m_A = 6.0f;
257 activationDesc.m_B = 0.0f;
258 break;
259 }
260 case kTfLiteActSigmoid:
261 {
262 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
263 break;
264 }
265 case kTfLiteActTanh:
266 {
267 activationDesc.m_Function = armnn::ActivationFunction::TanH;
268 activationDesc.m_A = 1.0f;
269 activationDesc.m_B = 1.0f;
270 break;
271 }
272 default:
273 return kTfLiteError;
274 }
275
276 bool isSupported = false;
277 armnn::BackendId setBackend;
278 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ACTIVATION",
279 tfLiteContext,
280 IsActivationSupported,
281 data.m_Backends,
282 isSupported,
283 setBackend,
284 activationOutputInfo,
285 activationOutputInfo,
286 activationDesc);
287 if (!isSupported)
288 {
289 return kTfLiteError;
290 }
291 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
292 activationLayer->SetBackendId(setBackend);
293
294 ARMNN_ASSERT(activationLayer != nullptr);
295 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
296
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100297 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
298 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
299 // each output slot in the node.
300 const int* outputIndexArray;
301 int numOutputs;
302 TfLiteStatus outputStatus = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs);
303 if(outputStatus != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000304 {
305 return kTfLiteError;
306 }
307
308 // Connect and prepare output slots
309 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
310 {
311 data.m_OutputSlotForNode[static_cast<unsigned long>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100312 outputIndexArray[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Matthew Sloyan11572322023-03-16 10:17:51 +0000313
314 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100315 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndexArray[outputIndex])] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000316 }
317 return kTfLiteOk;
318}
319
320armnn::IConnectableLayer* AddReshapeLayer(TfLiteOpaqueContext* tfLiteContext,
321 TfLiteOpaqueNode* tfLiteNode,
322 armnn::IConnectableLayer* prevLayer,
323 armnn::TensorInfo reshapedOutputTensorInfo,
324 armnn::TensorInfo outputTensorInfo,
325 armnnOpaqueDelegate::DelegateData& data)
326{
327 armnn::ReshapeDescriptor desc;
328 desc.m_TargetShape = outputTensorInfo.GetShape();
329
330 bool isSupported = false;
331 armnn::BackendId setBackend;
332 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
333 tfLiteContext,
334 IsReshapeSupported,
335 data.m_Backends,
336 isSupported,
337 setBackend,
338 reshapedOutputTensorInfo,
339 outputTensorInfo,
340 desc);
341
342 if (!isSupported)
343 {
344 return nullptr;
345 }
346
347 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
348 reshapeLayer->SetBackendId(setBackend);
349 ARMNN_ASSERT(reshapeLayer != nullptr);
350
351 prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
352 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
353
354 // Gather array of indices and it's length, replaces node->outputs->data[i]
355 const int* outputIndices = nullptr;
356 int numOutputs = 0;
357
358 TfLiteStatus status = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndices, &numOutputs);
359 if(status != kTfLiteOk)
360 {
361 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather output information from node.");
362 }
363
364 if (static_cast<unsigned int>(numOutputs) != reshapeLayer->GetNumOutputSlots())
365 {
366 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (" +
367 std::to_string(numOutputs) +
368 "!= " +
369 std::to_string(reshapeLayer->GetNumOutputSlots()) +
370 ") in node.");
371 }
372
373 // Connect and prepare output slots
374 for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
375 {
376 data.m_OutputSlotForNode[static_cast<unsigned long>(
377 outputIndices[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
378
379 armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
380 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndices[outputIndex])] = &outputSlot;
381 }
382 return reshapeLayer;
383}
384
385armnn::DataType GetDataType(const TfLiteOpaqueTensor* tfLiteTensor)
386{
387 switch (TfLiteOpaqueTensorType(tfLiteTensor))
388 {
389 case kTfLiteBool:
390 return armnn::DataType::Boolean;
391 case kTfLiteFloat32:
392 return armnn::DataType::Float32;
393 case kTfLiteFloat16:
394 return armnn::DataType::Float16;
395 case kTfLiteUInt8:
396 return armnn::DataType::QAsymmU8;
397 case kTfLiteInt8:
398 {
399 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
400 if (quantizationInfo.type == kTfLiteAffineQuantization)
401 {
402 auto* quantization =
403 reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
404
405 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
406 {
407 return armnn::DataType::QAsymmS8;
408 }
409 else
410 {
411 return armnn::DataType::QSymmS8;
412 }
413 }
414 else
415 {
416 return armnn::DataType::QAsymmS8;
417 }
418 }
419 case kTfLiteInt16:
420 return armnn::DataType::QSymmS16;
421 case kTfLiteInt32:
422 return armnn::DataType::Signed32;
423 case kTfLiteInt64:
424 return armnn::DataType::Signed64;
425 default:
426 throw armnn::Exception(
Teresa Charlinf69ae562023-04-27 14:42:23 +0100427 &"TfLiteArmnnOpaqueDelegate: Unsupported data type: " [ TfLiteOpaqueTensorType(tfLiteTensor) ]);
Matthew Sloyan11572322023-03-16 10:17:51 +0000428 }
429}
430
431armnn::TensorInfo GetTensorInfoForTfLiteOpaqueTensor(const TfLiteOpaqueTensor* tfLiteTensor, bool isOutput = false)
432{
433 armnn::DataType type = GetDataType(tfLiteTensor);
434 armnn::TensorInfo ret;
435
436 auto tensorDimensionSize = TfLiteOpaqueTensorNumDims(tfLiteTensor);
437 if (tensorDimensionSize == 0)
438 {
439 // If input tensor does not have a shape
440 // assuming that it has 1D tensor
441 if (!isOutput)
442 {
443 std::vector<unsigned int> safeShape = { 1 };
444 bool dimensionsSpecificity[1] = { true };
445
446 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
447 safeShape.data(),
448 dimensionsSpecificity);
449 ret = armnn::TensorInfo(tensorShape, type);
450
451 if(IsConstantTensor(tfLiteTensor))
452 {
453 ret.SetConstant(true);
454 }
455 }
456 else
457 {
458 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
459 ret = armnn::TensorInfo(tensorShape, type);
460 }
461 }
462 else
463 {
464 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
465 bool dimensionsSpecificity[5] = { true, true, true, true, true };
466
467 for (int32_t i = 0; i < tensorDimensionSize; ++i)
468 {
469 int32_t dim = TfLiteOpaqueTensorDim(tfLiteTensor, i);
470
471 if (dim == 0)
472 {
473 dimensionsSpecificity[i] = false;
474 }
475 tensorDims[i] = static_cast<unsigned int>(dim);
476 }
477
478 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
479 tensorDims.data(),
480 dimensionsSpecificity);
481
482 if(IsConstantTensor(tfLiteTensor))
483 {
484 ret = armnn::TensorInfo(tensorShape, type);
485 ret.SetConstant(true);
486 }
487 else
488 {
489 ret = armnn::TensorInfo(tensorShape, type);
490 }
491 }
492
493 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
494 if (quantizationInfo.type == kTfLiteAffineQuantization)
495 {
496 // get per-channel quantization parameters
497 const auto* affineQuantization =
498 reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
499 if (affineQuantization->scale->size > 1)
500 {
501 std::vector<float> quantizationScales;
502 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
503 {
504 quantizationScales.push_back(affineQuantization->scale->data[i]);
505 }
506 ret.SetQuantizationScales(quantizationScales);
507 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
508 }
509 else
510 {
511 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
512 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
513 }
514 }
515 else
516 {
517 auto quantizationParameters = TfLiteOpaqueTensorGetQuantizationParams(tfLiteTensor);
518 ret.SetQuantizationScale(quantizationParameters.scale);
519 ret.SetQuantizationOffset(quantizationParameters.zero_point);
520 }
521
522 return ret;
523}
524
525armnn::ConstTensor CreateConstTensor(const TfLiteOpaqueTensor* tfLiteTensor,
526 const armnn::TensorInfo& tensorInfo)
527{
528 auto allocType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
529 if (allocType != kTfLiteMmapRo)
530 {
Teresa Charlinf69ae562023-04-27 14:42:23 +0100531 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Not constant allocation type: " + std::to_string(allocType));
Matthew Sloyan11572322023-03-16 10:17:51 +0000532 }
533
534 return armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
535}
536
537armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteOpaqueContext* tfLiteContext,
538 TfLiteOpaqueNode* tfLiteNode,
539 int index)
540{
541 const TfLiteOpaqueTensor* tfLiteTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, index);
542 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteTensor);
543
544 return new armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
545}
546
547bool IsOptionalOperandPresent(TfLiteOpaqueNode* tfLiteNode, const int operandIndex)
548{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100549 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
550 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
551 // tensors for each input slot in the node.
552 const int* inputIndexArray;
Matthew Sloyan11572322023-03-16 10:17:51 +0000553 int numInputs = 0;
554
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100555 TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs);
Matthew Sloyan11572322023-03-16 10:17:51 +0000556 if(status != kTfLiteOk)
557 {
558 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
559 }
560
561 // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
562 // less then the input is not present.
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100563 if (numInputs > operandIndex && inputIndexArray[operandIndex] >= 0)
Matthew Sloyan11572322023-03-16 10:17:51 +0000564 {
565 return true;
566 }
567 return false;
568}
569
570TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
571 armnnOpaqueDelegate::DelegateData& delegateData,
572 TfLiteOpaqueContext* tfLiteContext,
573 TfLiteOpaqueNode* tfLiteNode)
574{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100575 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
576 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
577 // tensors for each input slot in the node.
578 const int* inputIndexArray;
579 int numInputs = 0;
580
581 TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs);
Matthew Sloyan11572322023-03-16 10:17:51 +0000582 if(status != kTfLiteOk)
583 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100584 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
Matthew Sloyan11572322023-03-16 10:17:51 +0000585 }
586
587 // Process input tensors
588 // If input tensor is a Constant tensor create a constant layer and connect it to the network
589 for (int32_t inputIndex = 0; inputIndex < static_cast<int32_t>(layer->GetNumInputSlots()); ++inputIndex)
590 {
591 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, inputIndex);
592
593 if (IsConstantTensor(tfLiteInputTensor))
594 {
595 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
596
597 bool isSupported = false;
598 armnn::BackendId setBackend;
599 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONSTANT",
600 tfLiteContext,
601 IsConstantSupported,
602 delegateData.m_Backends,
603 isSupported,
604 setBackend,
605 inputTensorInfo);
606 if (!isSupported)
607 {
608 return kTfLiteError;
609 }
610
611 auto constantInput = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
612
613 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
614 constantLayer->SetBackendId(setBackend);
615 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
616 outputSlot.SetTensorInfo(inputTensorInfo);
617
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100618 delegateData.m_OutputSlotForNode[inputIndexArray[inputIndex]] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000619 }
620 }
621 return kTfLiteOk;
622}
623
624} // namespace anonymous