blob: 688c683fa801068d35e428c7f525841d2731daf2 [file] [log] [blame]
Matthew Sloyan11572322023-03-16 10:17:51 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9#include <DelegateUtils.hpp>
10
11#include <armnn/ArmNN.hpp>
12#include <armnn/BackendHelper.hpp>
13#include <armnn/utility/Assert.hpp>
14#include <armnn/utility/NumericCast.hpp>
15
16#include <armnnUtils/Permute.hpp>
17#include <armnnUtils/TensorUtils.hpp>
18
19#include <tensorflow/lite/builtin_ops.h>
20#include <tensorflow/lite/c/builtin_op_data.h>
21#include <tensorflow/lite/c/common.h>
22#include <tensorflow/lite/c/c_api_opaque.h>
23#include <tensorflow/lite/minimal_logging.h>
24#include <tensorflow/lite/kernels/kernel_util.h>
25
26namespace
27{
28
29// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
30#define FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
31try \
32{ \
33 for (auto&& backendId : backends) \
34 { \
35 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
36 if (layerSupportObject.IsBackendRegistered()) \
37 { \
38 std::string reasonIfUnsupported; \
39 supported = \
40 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
41 if (supported) \
42 { \
43 setBackend = backendId; \
44 break; \
45 } \
46 else \
47 { \
48 if (reasonIfUnsupported.size() > 0) \
49 { \
50 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
51 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
52 } \
53 else \
54 { \
55 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
56 "%s: not supported by armnn", opName); \
57 } \
58 } \
59 } \
60 else \
61 { \
62 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", \
63 opName, backendId.Get().c_str()); \
64 } \
65 } \
66 if (!supported) \
67 { \
68 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
69 } \
70} \
71catch (const armnn::InvalidArgumentException &e) \
72{ \
73 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
74}
75
76TfLiteStatus ValidateNumInputs(TfLiteOpaqueContext* tfLiteContext,
77 TfLiteOpaqueNode* tfLiteNode,
78 const unsigned int expectedSize,
79 int nodeIndex)
80{
81 int numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
82 if (static_cast<unsigned int>(numInputs) != expectedSize)
83 {
84 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
85 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of inputs (%d != %d) in node #%d",
86 numInputs, expectedSize, nodeIndex);
87 return kTfLiteError;
88 }
89 return kTfLiteOk;
90}
91
92TfLiteStatus ValidateNumOutputs(TfLiteOpaqueContext* tfLiteContext,
93 TfLiteOpaqueNode* tfLiteNode,
94 const unsigned int expectedSize,
95 int nodeIndex)
96{
97 auto numOutputs = TfLiteOpaqueNodeNumberOfOutputs(tfLiteNode);
98 if (static_cast<unsigned int>(numOutputs) != expectedSize)
99 {
100 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
101 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (%d != %d) in node #%d",
102 numOutputs, expectedSize, nodeIndex);
103 return kTfLiteError;
104 }
105 return kTfLiteOk;
106}
107
108bool IsConstantTensor(const TfLiteOpaqueTensor* tfLiteTensor)
109{
110 auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
111 if (tensorAllocationType == kTfLiteMmapRo)
112 {
113 return true;
114 }
115 return false;
116}
117
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100118bool IsDynamicTensor(const TfLiteOpaqueTensor* tfLiteTensor)
Matthew Sloyan11572322023-03-16 10:17:51 +0000119{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100120 auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
Matthew Sloyan11572322023-03-16 10:17:51 +0000121 if (tensorAllocationType == kTfLiteDynamic)
122 {
123 return true;
124 }
125 return false;
126}
127
128bool IsValid(const TfLiteOpaqueTensor* tfLiteTensor)
129{
130 return tfLiteTensor == nullptr ? false : true;
131}
132
133bool IsValid(TfLiteOpaqueContext* tfLiteContext,
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100134 const TfLiteOpaqueTensor* tfLiteTensor,
Matthew Sloyan11572322023-03-16 10:17:51 +0000135 int32_t operatorCode,
136 int32_t nodeIndex)
137{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100138 if(!IsValid(tfLiteTensor))
Matthew Sloyan11572322023-03-16 10:17:51 +0000139 {
140 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
141 tfLiteContext,
142 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
143 operatorCode, nodeIndex);
144 return false;
145 }
146 if (IsDynamicTensor(tfLiteTensor))
147 {
148 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
149 tfLiteContext,
150 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
151 operatorCode, nodeIndex);
152 return false;
153 }
154 return true;
155}
156
157bool IsAffineQuantization(const TfLiteOpaqueTensor& tfLiteTensor)
158{
159 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(&tfLiteTensor);
160 if (quantizationInfo.type == kTfLiteAffineQuantization)
161 {
162 return true;
163 }
164 return false;
165}
166
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100167// Connects the layer to the graph
Matthew Sloyan11572322023-03-16 10:17:51 +0000168TfLiteStatus Connect(armnn::IConnectableLayer* layer,
169 TfLiteOpaqueContext* tfLiteContext,
170 TfLiteOpaqueNode* tfLiteNode,
171 armnnOpaqueDelegate::DelegateData& data)
172{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100173 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
174 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
175 // tensors for each input slot in the node.
176 const int* inputIndexArray;
177 int numInputs;
178 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000179 {
180 return kTfLiteError;
181 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100182 // numInputs is set from TfLiteOpaqueNodeInputs.
183 if(numInputs != static_cast<int>(layer->GetNumInputSlots()))
184 {
185 ARMNN_LOG(error) << "Layer: " << layer->GetName() << ": Expected number of input slots does not match actual "
186 "number of input slots.";
187 return kTfLiteError;
188 }
189 // Connect the input slots.
190 // For each input slot, get the index of the opaque tensor that was allocated for it.
Matthew Sloyan11572322023-03-16 10:17:51 +0000191 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
192 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100193 if (data.m_OutputSlotForNode[inputIndexArray[inputIndex]] != nullptr)
Matthew Sloyan11572322023-03-16 10:17:51 +0000194 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100195 data.m_OutputSlotForNode[inputIndexArray[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
Matthew Sloyan11572322023-03-16 10:17:51 +0000196 }
197 }
198
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100199 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
200 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
201 // each output slot in the node.
202 const int* outputIndexArray;
203 int numOutputs;
204 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000205 {
206 return kTfLiteError;
207 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100208 // numOutputs is set from TfLiteOpaqueNodeOutputs.
209 if(numOutputs != static_cast<int>(layer->GetNumOutputSlots()))
210 {
211 ARMNN_LOG(error) << "Layer: " << layer->GetName() << ": Expected number of output slots does not match actual "
212 "number of output slots.";
213 return kTfLiteError;
214 }
Matthew Sloyan11572322023-03-16 10:17:51 +0000215
216 // Prepare output slots
217 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
218 {
219 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100220 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndexArray[outputIndex])] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000221 }
222
223 return kTfLiteOk;
224}
225
226TfLiteStatus FusedActivation(TfLiteOpaqueContext* tfLiteContext,
227 TfLiteOpaqueNode* tfLiteNode,
228 TfLiteFusedActivation activationType,
229 armnn::IConnectableLayer* prevLayer,
230 unsigned int outputSlotIndex,
231 armnnOpaqueDelegate::DelegateData& data)
232{
233 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
234
235 armnn::ActivationDescriptor activationDesc;
236
237 switch (activationType)
238 {
239 case kTfLiteActNone:
240 {
241 // No Activation
242 return kTfLiteOk;
243 }
244 case kTfLiteActRelu:
245 {
246 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
247 break;
248 }
249 case kTfLiteActReluN1To1:
250 {
251 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
252 activationDesc.m_A = 1.0f;
253 activationDesc.m_B = -1.0f;
254 break;
255 }
256 case kTfLiteActRelu6:
257 {
258 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
259 activationDesc.m_A = 6.0f;
260 activationDesc.m_B = 0.0f;
261 break;
262 }
263 case kTfLiteActSigmoid:
264 {
265 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
266 break;
267 }
268 case kTfLiteActTanh:
269 {
270 activationDesc.m_Function = armnn::ActivationFunction::TanH;
271 activationDesc.m_A = 1.0f;
272 activationDesc.m_B = 1.0f;
273 break;
274 }
275 default:
276 return kTfLiteError;
277 }
278
279 bool isSupported = false;
280 armnn::BackendId setBackend;
281 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ACTIVATION",
282 tfLiteContext,
283 IsActivationSupported,
284 data.m_Backends,
285 isSupported,
286 setBackend,
287 activationOutputInfo,
288 activationOutputInfo,
289 activationDesc);
290 if (!isSupported)
291 {
292 return kTfLiteError;
293 }
294 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
295 activationLayer->SetBackendId(setBackend);
296
297 ARMNN_ASSERT(activationLayer != nullptr);
298 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
299
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100300 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
301 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
302 // each output slot in the node.
303 const int* outputIndexArray;
304 int numOutputs;
305 TfLiteStatus outputStatus = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs);
306 if(outputStatus != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000307 {
308 return kTfLiteError;
309 }
310
311 // Connect and prepare output slots
312 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
313 {
314 data.m_OutputSlotForNode[static_cast<unsigned long>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100315 outputIndexArray[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Matthew Sloyan11572322023-03-16 10:17:51 +0000316
317 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100318 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndexArray[outputIndex])] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000319 }
320 return kTfLiteOk;
321}
322
323armnn::IConnectableLayer* AddReshapeLayer(TfLiteOpaqueContext* tfLiteContext,
324 TfLiteOpaqueNode* tfLiteNode,
325 armnn::IConnectableLayer* prevLayer,
326 armnn::TensorInfo reshapedOutputTensorInfo,
327 armnn::TensorInfo outputTensorInfo,
328 armnnOpaqueDelegate::DelegateData& data)
329{
330 armnn::ReshapeDescriptor desc;
331 desc.m_TargetShape = outputTensorInfo.GetShape();
332
333 bool isSupported = false;
334 armnn::BackendId setBackend;
335 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
336 tfLiteContext,
337 IsReshapeSupported,
338 data.m_Backends,
339 isSupported,
340 setBackend,
341 reshapedOutputTensorInfo,
342 outputTensorInfo,
343 desc);
344
345 if (!isSupported)
346 {
347 return nullptr;
348 }
349
350 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
351 reshapeLayer->SetBackendId(setBackend);
352 ARMNN_ASSERT(reshapeLayer != nullptr);
353
354 prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
355 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
356
357 // Gather array of indices and it's length, replaces node->outputs->data[i]
358 const int* outputIndices = nullptr;
359 int numOutputs = 0;
360
361 TfLiteStatus status = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndices, &numOutputs);
362 if(status != kTfLiteOk)
363 {
364 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather output information from node.");
365 }
366
367 if (static_cast<unsigned int>(numOutputs) != reshapeLayer->GetNumOutputSlots())
368 {
369 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (" +
370 std::to_string(numOutputs) +
371 "!= " +
372 std::to_string(reshapeLayer->GetNumOutputSlots()) +
373 ") in node.");
374 }
375
376 // Connect and prepare output slots
377 for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
378 {
379 data.m_OutputSlotForNode[static_cast<unsigned long>(
380 outputIndices[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
381
382 armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
383 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndices[outputIndex])] = &outputSlot;
384 }
385 return reshapeLayer;
386}
387
388armnn::DataType GetDataType(const TfLiteOpaqueTensor* tfLiteTensor)
389{
390 switch (TfLiteOpaqueTensorType(tfLiteTensor))
391 {
392 case kTfLiteBool:
393 return armnn::DataType::Boolean;
394 case kTfLiteFloat32:
395 return armnn::DataType::Float32;
396 case kTfLiteFloat16:
397 return armnn::DataType::Float16;
398 case kTfLiteUInt8:
399 return armnn::DataType::QAsymmU8;
400 case kTfLiteInt8:
401 {
402 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
403 if (quantizationInfo.type == kTfLiteAffineQuantization)
404 {
405 auto* quantization =
406 reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
407
408 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
409 {
410 return armnn::DataType::QAsymmS8;
411 }
412 else
413 {
414 return armnn::DataType::QSymmS8;
415 }
416 }
417 else
418 {
419 return armnn::DataType::QAsymmS8;
420 }
421 }
422 case kTfLiteInt16:
423 return armnn::DataType::QSymmS16;
424 case kTfLiteInt32:
425 return armnn::DataType::Signed32;
426 case kTfLiteInt64:
427 return armnn::DataType::Signed64;
428 default:
429 throw armnn::Exception(
430 &"TfLiteArmnnDelegate: Unsupported data type: " [ TfLiteOpaqueTensorType(tfLiteTensor) ]);
431 }
432}
433
434armnn::TensorInfo GetTensorInfoForTfLiteOpaqueTensor(const TfLiteOpaqueTensor* tfLiteTensor, bool isOutput = false)
435{
436 armnn::DataType type = GetDataType(tfLiteTensor);
437 armnn::TensorInfo ret;
438
439 auto tensorDimensionSize = TfLiteOpaqueTensorNumDims(tfLiteTensor);
440 if (tensorDimensionSize == 0)
441 {
442 // If input tensor does not have a shape
443 // assuming that it has 1D tensor
444 if (!isOutput)
445 {
446 std::vector<unsigned int> safeShape = { 1 };
447 bool dimensionsSpecificity[1] = { true };
448
449 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
450 safeShape.data(),
451 dimensionsSpecificity);
452 ret = armnn::TensorInfo(tensorShape, type);
453
454 if(IsConstantTensor(tfLiteTensor))
455 {
456 ret.SetConstant(true);
457 }
458 }
459 else
460 {
461 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
462 ret = armnn::TensorInfo(tensorShape, type);
463 }
464 }
465 else
466 {
467 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
468 bool dimensionsSpecificity[5] = { true, true, true, true, true };
469
470 for (int32_t i = 0; i < tensorDimensionSize; ++i)
471 {
472 int32_t dim = TfLiteOpaqueTensorDim(tfLiteTensor, i);
473
474 if (dim == 0)
475 {
476 dimensionsSpecificity[i] = false;
477 }
478 tensorDims[i] = static_cast<unsigned int>(dim);
479 }
480
481 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
482 tensorDims.data(),
483 dimensionsSpecificity);
484
485 if(IsConstantTensor(tfLiteTensor))
486 {
487 ret = armnn::TensorInfo(tensorShape, type);
488 ret.SetConstant(true);
489 }
490 else
491 {
492 ret = armnn::TensorInfo(tensorShape, type);
493 }
494 }
495
496 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
497 if (quantizationInfo.type == kTfLiteAffineQuantization)
498 {
499 // get per-channel quantization parameters
500 const auto* affineQuantization =
501 reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
502 if (affineQuantization->scale->size > 1)
503 {
504 std::vector<float> quantizationScales;
505 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
506 {
507 quantizationScales.push_back(affineQuantization->scale->data[i]);
508 }
509 ret.SetQuantizationScales(quantizationScales);
510 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
511 }
512 else
513 {
514 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
515 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
516 }
517 }
518 else
519 {
520 auto quantizationParameters = TfLiteOpaqueTensorGetQuantizationParams(tfLiteTensor);
521 ret.SetQuantizationScale(quantizationParameters.scale);
522 ret.SetQuantizationOffset(quantizationParameters.zero_point);
523 }
524
525 return ret;
526}
527
528armnn::ConstTensor CreateConstTensor(const TfLiteOpaqueTensor* tfLiteTensor,
529 const armnn::TensorInfo& tensorInfo)
530{
531 auto allocType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
532 if (allocType != kTfLiteMmapRo)
533 {
534 throw armnn::Exception("TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(allocType));
535 }
536
537 return armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
538}
539
540armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteOpaqueContext* tfLiteContext,
541 TfLiteOpaqueNode* tfLiteNode,
542 int index)
543{
544 const TfLiteOpaqueTensor* tfLiteTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, index);
545 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteTensor);
546
547 return new armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
548}
549
550bool IsOptionalOperandPresent(TfLiteOpaqueNode* tfLiteNode, const int operandIndex)
551{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100552 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
553 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
554 // tensors for each input slot in the node.
555 const int* inputIndexArray;
Matthew Sloyan11572322023-03-16 10:17:51 +0000556 int numInputs = 0;
557
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100558 TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs);
Matthew Sloyan11572322023-03-16 10:17:51 +0000559 if(status != kTfLiteOk)
560 {
561 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
562 }
563
564 // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
565 // less then the input is not present.
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100566 if (numInputs > operandIndex && inputIndexArray[operandIndex] >= 0)
Matthew Sloyan11572322023-03-16 10:17:51 +0000567 {
568 return true;
569 }
570 return false;
571}
572
573TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
574 armnnOpaqueDelegate::DelegateData& delegateData,
575 TfLiteOpaqueContext* tfLiteContext,
576 TfLiteOpaqueNode* tfLiteNode)
577{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100578 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
579 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
580 // tensors for each input slot in the node.
581 const int* inputIndexArray;
582 int numInputs = 0;
583
584 TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs);
Matthew Sloyan11572322023-03-16 10:17:51 +0000585 if(status != kTfLiteOk)
586 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100587 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
Matthew Sloyan11572322023-03-16 10:17:51 +0000588 }
589
590 // Process input tensors
591 // If input tensor is a Constant tensor create a constant layer and connect it to the network
592 for (int32_t inputIndex = 0; inputIndex < static_cast<int32_t>(layer->GetNumInputSlots()); ++inputIndex)
593 {
594 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, inputIndex);
595
596 if (IsConstantTensor(tfLiteInputTensor))
597 {
598 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
599
600 bool isSupported = false;
601 armnn::BackendId setBackend;
602 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONSTANT",
603 tfLiteContext,
604 IsConstantSupported,
605 delegateData.m_Backends,
606 isSupported,
607 setBackend,
608 inputTensorInfo);
609 if (!isSupported)
610 {
611 return kTfLiteError;
612 }
613
614 auto constantInput = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
615
616 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
617 constantLayer->SetBackendId(setBackend);
618 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
619 outputSlot.SetTensorInfo(inputTensorInfo);
620
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100621 delegateData.m_OutputSlotForNode[inputIndexArray[inputIndex]] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000622 }
623 }
624 return kTfLiteOk;
625}
626
627} // namespace anonymous