blob: a636161300f1d0a32ee420b317e876c2a37d0b16 [file] [log] [blame]
Matthew Sloyan11572322023-03-16 10:17:51 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9#include <DelegateUtils.hpp>
10
11#include <armnn/ArmNN.hpp>
12#include <armnn/BackendHelper.hpp>
Mike Kellya2806502023-08-03 10:42:11 +010013#include <armnn/TypesUtils.hpp>
Matthew Sloyan11572322023-03-16 10:17:51 +000014#include <armnn/utility/Assert.hpp>
15#include <armnn/utility/NumericCast.hpp>
16
17#include <armnnUtils/Permute.hpp>
18#include <armnnUtils/TensorUtils.hpp>
19
20#include <tensorflow/lite/builtin_ops.h>
21#include <tensorflow/lite/c/builtin_op_data.h>
22#include <tensorflow/lite/c/common.h>
23#include <tensorflow/lite/c/c_api_opaque.h>
24#include <tensorflow/lite/minimal_logging.h>
25#include <tensorflow/lite/kernels/kernel_util.h>
26
Mike Kellya2806502023-08-03 10:42:11 +010027#include <fmt/format.h>
28
Matthew Sloyan11572322023-03-16 10:17:51 +000029namespace
30{
Mike Kellya2806502023-08-03 10:42:11 +010031std::string GetName(armnn::ActivationFunction function, int nodeIndex)
32{
33 return fmt::format("{}:{}", GetActivationFunctionAsCString(function), nodeIndex);
34}
35
36std::string GetName(armnn::ArgMinMaxFunction function, int nodeIndex)
37{
38 return fmt::format("{}:{}", GetArgMinMaxFunctionAsCString(function), nodeIndex);
39}
40
41std::string GetName(armnn::BinaryOperation opType, int nodeIndex)
42{
43 return fmt::format("{}:{}", GetBinaryOperationAsCString(opType), nodeIndex);
44}
45
46std::string GetName(armnn::ComparisonOperation layerType, int nodeIndex)
47{
48 return fmt::format("{}:{}", GetComparisonOperationAsCString(layerType), nodeIndex);
49}
50
51std::string GetName(armnn::LogicalBinaryOperation operation, int nodeIndex)
52{
53 return fmt::format("{}:{}", GetLogicalBinaryOperationAsCString(operation), nodeIndex);
54}
55
56std::string GetName(armnn::UnaryOperation opType, int nodeIndex)
57{
58 return fmt::format("{}:{}", GetUnaryOperationAsCString(opType), nodeIndex);
59}
60
61std::string GetName(armnn::LayerType layerType, int nodeIndex, std::string subname = "")
62{
63 return fmt::format("{}{}:{}", GetLayerTypeAsCString(layerType), subname, nodeIndex);
64}
Matthew Sloyan11572322023-03-16 10:17:51 +000065
66// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
67#define FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
68try \
69{ \
70 for (auto&& backendId : backends) \
71 { \
72 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
73 if (layerSupportObject.IsBackendRegistered()) \
74 { \
75 std::string reasonIfUnsupported; \
76 supported = \
77 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
78 if (supported) \
79 { \
80 setBackend = backendId; \
81 break; \
82 } \
83 else \
84 { \
85 if (reasonIfUnsupported.size() > 0) \
86 { \
87 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
88 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
89 } \
90 else \
91 { \
92 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
93 "%s: not supported by armnn", opName); \
94 } \
95 } \
96 } \
97 else \
98 { \
99 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", \
100 opName, backendId.Get().c_str()); \
101 } \
102 } \
103 if (!supported) \
104 { \
105 TF_LITE_OPAQUE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
106 } \
107} \
108catch (const armnn::InvalidArgumentException &e) \
109{ \
110 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
111}
112
113TfLiteStatus ValidateNumInputs(TfLiteOpaqueContext* tfLiteContext,
114 TfLiteOpaqueNode* tfLiteNode,
115 const unsigned int expectedSize,
116 int nodeIndex)
117{
118 int numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
119 if (static_cast<unsigned int>(numInputs) != expectedSize)
120 {
121 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
122 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of inputs (%d != %d) in node #%d",
123 numInputs, expectedSize, nodeIndex);
124 return kTfLiteError;
125 }
126 return kTfLiteOk;
127}
128
129TfLiteStatus ValidateNumOutputs(TfLiteOpaqueContext* tfLiteContext,
130 TfLiteOpaqueNode* tfLiteNode,
131 const unsigned int expectedSize,
132 int nodeIndex)
133{
134 auto numOutputs = TfLiteOpaqueNodeNumberOfOutputs(tfLiteNode);
135 if (static_cast<unsigned int>(numOutputs) != expectedSize)
136 {
137 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
138 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (%d != %d) in node #%d",
139 numOutputs, expectedSize, nodeIndex);
140 return kTfLiteError;
141 }
142 return kTfLiteOk;
143}
144
145bool IsConstantTensor(const TfLiteOpaqueTensor* tfLiteTensor)
146{
147 auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
148 if (tensorAllocationType == kTfLiteMmapRo)
149 {
150 return true;
151 }
152 return false;
153}
154
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100155bool IsDynamicTensor(const TfLiteOpaqueTensor* tfLiteTensor)
Matthew Sloyan11572322023-03-16 10:17:51 +0000156{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100157 auto tensorAllocationType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
Matthew Sloyan11572322023-03-16 10:17:51 +0000158 if (tensorAllocationType == kTfLiteDynamic)
159 {
160 return true;
161 }
162 return false;
163}
164
165bool IsValid(const TfLiteOpaqueTensor* tfLiteTensor)
166{
167 return tfLiteTensor == nullptr ? false : true;
168}
169
170bool IsValid(TfLiteOpaqueContext* tfLiteContext,
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100171 const TfLiteOpaqueTensor* tfLiteTensor,
Matthew Sloyan11572322023-03-16 10:17:51 +0000172 int32_t operatorCode,
173 int32_t nodeIndex)
174{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100175 if(!IsValid(tfLiteTensor))
Matthew Sloyan11572322023-03-16 10:17:51 +0000176 {
177 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
178 tfLiteContext,
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100179 "TfLiteArmnnOpaqueDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
Matthew Sloyan11572322023-03-16 10:17:51 +0000180 operatorCode, nodeIndex);
181 return false;
182 }
183 if (IsDynamicTensor(tfLiteTensor))
184 {
185 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
186 tfLiteContext,
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100187 "TfLiteArmnnOpaqueDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
Matthew Sloyan11572322023-03-16 10:17:51 +0000188 operatorCode, nodeIndex);
189 return false;
190 }
191 return true;
192}
193
194bool IsAffineQuantization(const TfLiteOpaqueTensor& tfLiteTensor)
195{
196 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(&tfLiteTensor);
197 if (quantizationInfo.type == kTfLiteAffineQuantization)
198 {
199 return true;
200 }
201 return false;
202}
203
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100204// Connects the layer to the graph
Matthew Sloyan11572322023-03-16 10:17:51 +0000205TfLiteStatus Connect(armnn::IConnectableLayer* layer,
206 TfLiteOpaqueContext* tfLiteContext,
207 TfLiteOpaqueNode* tfLiteNode,
208 armnnOpaqueDelegate::DelegateData& data)
209{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100210 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
211 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
212 // tensors for each input slot in the node.
213 const int* inputIndexArray;
214 int numInputs;
215 if(TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs) != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000216 {
217 return kTfLiteError;
218 }
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100219 // We can't validate the number of inputs vs the layer->GetNumOutputSlots() as some operators differ.
220 // An example is Mean where the number of TFLite inputs is 2, but number of Arm NN inputs is 1,
221 // as we store the axis within the descriptor.
222
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100223 // Connect the input slots.
224 // For each input slot, get the index of the opaque tensor that was allocated for it.
Matthew Sloyan11572322023-03-16 10:17:51 +0000225 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
226 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100227 if (data.m_OutputSlotForNode[inputIndexArray[inputIndex]] != nullptr)
Matthew Sloyan11572322023-03-16 10:17:51 +0000228 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100229 data.m_OutputSlotForNode[inputIndexArray[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
Matthew Sloyan11572322023-03-16 10:17:51 +0000230 }
231 }
232
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100233 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
234 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
235 // each output slot in the node.
236 const int* outputIndexArray;
237 int numOutputs;
238 if(TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs) != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000239 {
240 return kTfLiteError;
241 }
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100242 // numOutputs is set from TfLiteOpaqueNodeOutputs.
243 if(numOutputs != static_cast<int>(layer->GetNumOutputSlots()))
244 {
245 ARMNN_LOG(error) << "Layer: " << layer->GetName() << ": Expected number of output slots does not match actual "
246 "number of output slots.";
247 return kTfLiteError;
248 }
Matthew Sloyan11572322023-03-16 10:17:51 +0000249
250 // Prepare output slots
251 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
252 {
253 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100254 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndexArray[outputIndex])] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000255 }
256
257 return kTfLiteOk;
258}
259
260TfLiteStatus FusedActivation(TfLiteOpaqueContext* tfLiteContext,
261 TfLiteOpaqueNode* tfLiteNode,
262 TfLiteFusedActivation activationType,
263 armnn::IConnectableLayer* prevLayer,
264 unsigned int outputSlotIndex,
Mike Kellya2806502023-08-03 10:42:11 +0100265 armnnOpaqueDelegate::DelegateData& data,
266 int nodeIndex)
Matthew Sloyan11572322023-03-16 10:17:51 +0000267{
268 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
269
270 armnn::ActivationDescriptor activationDesc;
271
272 switch (activationType)
273 {
274 case kTfLiteActNone:
275 {
276 // No Activation
277 return kTfLiteOk;
278 }
279 case kTfLiteActRelu:
280 {
281 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
282 break;
283 }
284 case kTfLiteActReluN1To1:
285 {
286 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
287 activationDesc.m_A = 1.0f;
288 activationDesc.m_B = -1.0f;
289 break;
290 }
291 case kTfLiteActRelu6:
292 {
293 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
294 activationDesc.m_A = 6.0f;
295 activationDesc.m_B = 0.0f;
296 break;
297 }
298 case kTfLiteActSigmoid:
299 {
300 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
301 break;
302 }
303 case kTfLiteActTanh:
304 {
305 activationDesc.m_Function = armnn::ActivationFunction::TanH;
306 activationDesc.m_A = 1.0f;
307 activationDesc.m_B = 1.0f;
308 break;
309 }
310 default:
311 return kTfLiteError;
312 }
313
314 bool isSupported = false;
315 armnn::BackendId setBackend;
316 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("ACTIVATION",
317 tfLiteContext,
318 IsActivationSupported,
319 data.m_Backends,
320 isSupported,
321 setBackend,
322 activationOutputInfo,
323 activationOutputInfo,
324 activationDesc);
325 if (!isSupported)
326 {
327 return kTfLiteError;
328 }
Mike Kellya2806502023-08-03 10:42:11 +0100329 auto layerName = GetName(activationDesc.m_Function, nodeIndex);
330 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Matthew Sloyan11572322023-03-16 10:17:51 +0000331 activationLayer->SetBackendId(setBackend);
332
333 ARMNN_ASSERT(activationLayer != nullptr);
334 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
335
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100336 // Get array of output indices, outputIndexArray is set from the TfLiteOpaqueNodeOutputs function
337 // This function turns outputIndexArray into an int array of indices. These indices point to the tensors for
338 // each output slot in the node.
339 const int* outputIndexArray;
340 int numOutputs;
341 TfLiteStatus outputStatus = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndexArray, &numOutputs);
342 if(outputStatus != kTfLiteOk)
Matthew Sloyan11572322023-03-16 10:17:51 +0000343 {
344 return kTfLiteError;
345 }
346
347 // Connect and prepare output slots
348 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
349 {
350 data.m_OutputSlotForNode[static_cast<unsigned long>(
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100351 outputIndexArray[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Matthew Sloyan11572322023-03-16 10:17:51 +0000352
353 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100354 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndexArray[outputIndex])] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000355 }
356 return kTfLiteOk;
357}
358
359armnn::IConnectableLayer* AddReshapeLayer(TfLiteOpaqueContext* tfLiteContext,
360 TfLiteOpaqueNode* tfLiteNode,
361 armnn::IConnectableLayer* prevLayer,
362 armnn::TensorInfo reshapedOutputTensorInfo,
363 armnn::TensorInfo outputTensorInfo,
Mike Kellya2806502023-08-03 10:42:11 +0100364 armnnOpaqueDelegate::DelegateData& data,
365 int nodeIndex)
Matthew Sloyan11572322023-03-16 10:17:51 +0000366{
367 armnn::ReshapeDescriptor desc;
368 desc.m_TargetShape = outputTensorInfo.GetShape();
369
370 bool isSupported = false;
371 armnn::BackendId setBackend;
372 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
373 tfLiteContext,
374 IsReshapeSupported,
375 data.m_Backends,
376 isSupported,
377 setBackend,
378 reshapedOutputTensorInfo,
379 outputTensorInfo,
380 desc);
381
382 if (!isSupported)
383 {
384 return nullptr;
385 }
386
Mike Kellya2806502023-08-03 10:42:11 +0100387 auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex);
388 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc, layerName.c_str());
Matthew Sloyan11572322023-03-16 10:17:51 +0000389 reshapeLayer->SetBackendId(setBackend);
390 ARMNN_ASSERT(reshapeLayer != nullptr);
391
392 prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
393 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
394
395 // Gather array of indices and it's length, replaces node->outputs->data[i]
396 const int* outputIndices = nullptr;
397 int numOutputs = 0;
398
399 TfLiteStatus status = TfLiteOpaqueNodeOutputs(tfLiteNode, &outputIndices, &numOutputs);
400 if(status != kTfLiteOk)
401 {
402 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather output information from node.");
403 }
404
405 if (static_cast<unsigned int>(numOutputs) != reshapeLayer->GetNumOutputSlots())
406 {
407 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unexpected number of outputs (" +
408 std::to_string(numOutputs) +
409 "!= " +
410 std::to_string(reshapeLayer->GetNumOutputSlots()) +
411 ") in node.");
412 }
413
414 // Connect and prepare output slots
415 for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
416 {
417 data.m_OutputSlotForNode[static_cast<unsigned long>(
418 outputIndices[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
419
420 armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
421 data.m_OutputSlotForNode[static_cast<unsigned long>(outputIndices[outputIndex])] = &outputSlot;
422 }
423 return reshapeLayer;
424}
425
426armnn::DataType GetDataType(const TfLiteOpaqueTensor* tfLiteTensor)
427{
428 switch (TfLiteOpaqueTensorType(tfLiteTensor))
429 {
430 case kTfLiteBool:
431 return armnn::DataType::Boolean;
432 case kTfLiteFloat32:
433 return armnn::DataType::Float32;
434 case kTfLiteFloat16:
435 return armnn::DataType::Float16;
436 case kTfLiteUInt8:
437 return armnn::DataType::QAsymmU8;
438 case kTfLiteInt8:
439 {
440 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
441 if (quantizationInfo.type == kTfLiteAffineQuantization)
442 {
443 auto* quantization =
444 reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
445
446 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
447 {
448 return armnn::DataType::QAsymmS8;
449 }
450 else
451 {
452 return armnn::DataType::QSymmS8;
453 }
454 }
455 else
456 {
457 return armnn::DataType::QAsymmS8;
458 }
459 }
460 case kTfLiteInt16:
461 return armnn::DataType::QSymmS16;
462 case kTfLiteInt32:
463 return armnn::DataType::Signed32;
464 case kTfLiteInt64:
465 return armnn::DataType::Signed64;
466 default:
467 throw armnn::Exception(
Teresa Charlinf69ae562023-04-27 14:42:23 +0100468 &"TfLiteArmnnOpaqueDelegate: Unsupported data type: " [ TfLiteOpaqueTensorType(tfLiteTensor) ]);
Matthew Sloyan11572322023-03-16 10:17:51 +0000469 }
470}
471
472armnn::TensorInfo GetTensorInfoForTfLiteOpaqueTensor(const TfLiteOpaqueTensor* tfLiteTensor, bool isOutput = false)
473{
474 armnn::DataType type = GetDataType(tfLiteTensor);
475 armnn::TensorInfo ret;
476
477 auto tensorDimensionSize = TfLiteOpaqueTensorNumDims(tfLiteTensor);
478 if (tensorDimensionSize == 0)
479 {
480 // If input tensor does not have a shape
481 // assuming that it has 1D tensor
482 if (!isOutput)
483 {
484 std::vector<unsigned int> safeShape = { 1 };
485 bool dimensionsSpecificity[1] = { true };
486
Mike Kelly460a1792023-08-01 11:31:55 +0100487 armnn::TensorShape tensorShape(safeShape.size(),
Matthew Sloyan11572322023-03-16 10:17:51 +0000488 safeShape.data(),
489 dimensionsSpecificity);
490 ret = armnn::TensorInfo(tensorShape, type);
491
492 if(IsConstantTensor(tfLiteTensor))
493 {
494 ret.SetConstant(true);
495 }
496 }
497 else
498 {
499 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
500 ret = armnn::TensorInfo(tensorShape, type);
501 }
502 }
503 else
504 {
Mike Kelly460a1792023-08-01 11:31:55 +0100505 std::vector<unsigned int> tensorDims(tensorDimensionSize);
506 std::vector<unsigned char> dimensionsSpecificity(tensorDimensionSize, true);
Matthew Sloyan11572322023-03-16 10:17:51 +0000507
508 for (int32_t i = 0; i < tensorDimensionSize; ++i)
509 {
510 int32_t dim = TfLiteOpaqueTensorDim(tfLiteTensor, i);
511
Mike Kelly460a1792023-08-01 11:31:55 +0100512 if (dim <= 0)
Matthew Sloyan11572322023-03-16 10:17:51 +0000513 {
514 dimensionsSpecificity[i] = false;
515 }
516 tensorDims[i] = static_cast<unsigned int>(dim);
517 }
518
Mike Kelly460a1792023-08-01 11:31:55 +0100519 armnn::TensorShape tensorShape(tensorDimensionSize,
Matthew Sloyan11572322023-03-16 10:17:51 +0000520 tensorDims.data(),
Mike Kelly460a1792023-08-01 11:31:55 +0100521 reinterpret_cast<const bool *>(dimensionsSpecificity.data()));
Matthew Sloyan11572322023-03-16 10:17:51 +0000522
Mike Kelly460a1792023-08-01 11:31:55 +0100523 if (IsConstantTensor(tfLiteTensor))
Matthew Sloyan11572322023-03-16 10:17:51 +0000524 {
525 ret = armnn::TensorInfo(tensorShape, type);
526 ret.SetConstant(true);
527 }
528 else
529 {
530 ret = armnn::TensorInfo(tensorShape, type);
531 }
532 }
533
534 auto quantizationInfo = TfLiteOpaqueTensorGetQuantization(tfLiteTensor);
535 if (quantizationInfo.type == kTfLiteAffineQuantization)
536 {
537 // get per-channel quantization parameters
538 const auto* affineQuantization =
539 reinterpret_cast<TfLiteAffineQuantization*>(quantizationInfo.params);
540 if (affineQuantization->scale->size > 1)
541 {
542 std::vector<float> quantizationScales;
543 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
544 {
545 quantizationScales.push_back(affineQuantization->scale->data[i]);
546 }
547 ret.SetQuantizationScales(quantizationScales);
548 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
549 }
550 else
551 {
552 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
553 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
554 }
555 }
Matthew Sloyan11572322023-03-16 10:17:51 +0000556 return ret;
557}
558
559armnn::ConstTensor CreateConstTensor(const TfLiteOpaqueTensor* tfLiteTensor,
560 const armnn::TensorInfo& tensorInfo)
561{
562 auto allocType = TfLiteOpaqueTensorGetAllocationType(tfLiteTensor);
563 if (allocType != kTfLiteMmapRo)
564 {
Teresa Charlinf69ae562023-04-27 14:42:23 +0100565 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Not constant allocation type: " + std::to_string(allocType));
Matthew Sloyan11572322023-03-16 10:17:51 +0000566 }
567
568 return armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
569}
570
571armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteOpaqueContext* tfLiteContext,
572 TfLiteOpaqueNode* tfLiteNode,
573 int index)
574{
575 const TfLiteOpaqueTensor* tfLiteTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, index);
576 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteTensor);
577
578 return new armnn::ConstTensor(tensorInfo, TfLiteOpaqueTensorData(tfLiteTensor));
579}
580
581bool IsOptionalOperandPresent(TfLiteOpaqueNode* tfLiteNode, const int operandIndex)
582{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100583 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
584 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
585 // tensors for each input slot in the node.
586 const int* inputIndexArray;
Matthew Sloyan11572322023-03-16 10:17:51 +0000587 int numInputs = 0;
588
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100589 TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs);
Matthew Sloyan11572322023-03-16 10:17:51 +0000590 if(status != kTfLiteOk)
591 {
592 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
593 }
594
595 // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
596 // less then the input is not present.
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100597 if (numInputs > operandIndex && inputIndexArray[operandIndex] >= 0)
Matthew Sloyan11572322023-03-16 10:17:51 +0000598 {
599 return true;
600 }
601 return false;
602}
603
604TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
605 armnnOpaqueDelegate::DelegateData& delegateData,
606 TfLiteOpaqueContext* tfLiteContext,
Mike Kellya2806502023-08-03 10:42:11 +0100607 TfLiteOpaqueNode* tfLiteNode,
608 int nodeIndex)
Matthew Sloyan11572322023-03-16 10:17:51 +0000609{
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100610 // Get array of input indices, inputIndexArray is set from the TfLiteOpaqueNodeInputs function
611 // This function turns inputIndexArray into an int array of indices. These indices point to the index of the
612 // tensors for each input slot in the node.
613 const int* inputIndexArray;
614 int numInputs = 0;
615
616 TfLiteStatus status = TfLiteOpaqueNodeInputs(tfLiteNode, &inputIndexArray, &numInputs);
Matthew Sloyan11572322023-03-16 10:17:51 +0000617 if(status != kTfLiteOk)
618 {
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100619 throw armnn::Exception("TfLiteArmnnOpaqueDelegate: Unable to gather input information from node.");
Matthew Sloyan11572322023-03-16 10:17:51 +0000620 }
621
622 // Process input tensors
623 // If input tensor is a Constant tensor create a constant layer and connect it to the network
624 for (int32_t inputIndex = 0; inputIndex < static_cast<int32_t>(layer->GetNumInputSlots()); ++inputIndex)
625 {
626 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueNodeGetInput(tfLiteContext, tfLiteNode, inputIndex);
627
628 if (IsConstantTensor(tfLiteInputTensor))
629 {
630 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
631
632 bool isSupported = false;
633 armnn::BackendId setBackend;
634 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONSTANT",
635 tfLiteContext,
636 IsConstantSupported,
637 delegateData.m_Backends,
638 isSupported,
639 setBackend,
640 inputTensorInfo);
641 if (!isSupported)
642 {
643 return kTfLiteError;
644 }
645
646 auto constantInput = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
647
Mike Kellya2806502023-08-03 10:42:11 +0100648 auto layerName = GetName(armnn::LayerType::Constant, nodeIndex);
649 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput,
650 layerName.c_str());
Matthew Sloyan11572322023-03-16 10:17:51 +0000651 constantLayer->SetBackendId(setBackend);
652 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
653 outputSlot.SetTensorInfo(inputTensorInfo);
654
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100655 delegateData.m_OutputSlotForNode[inputIndexArray[inputIndex]] = &outputSlot;
Matthew Sloyan11572322023-03-16 10:17:51 +0000656 }
657 }
658 return kTfLiteOk;
659}
660
661} // namespace anonymous