blob: f5ff32a3dfcfcc28e18a1f81deff695bf77e65ae [file] [log] [blame]
Matthew Sloyan11572322023-03-16 10:17:51 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn_delegate.hpp>
9#include <DelegateUtils.hpp>
10
11#include <armnn/ArmNN.hpp>
12#include <armnn/BackendHelper.hpp>
Mike Kelly07169c82023-08-02 13:23:09 +010013#include <armnn/TypesUtils.hpp>
Matthew Sloyan11572322023-03-16 10:17:51 +000014#include <armnn/utility/Assert.hpp>
15#include <armnn/utility/NumericCast.hpp>
16
17#include <armnnUtils/Permute.hpp>
18#include <armnnUtils/TensorUtils.hpp>
19
20#include <tensorflow/lite/builtin_ops.h>
21#include <tensorflow/lite/c/builtin_op_data.h>
22#include <tensorflow/lite/c/common.h>
23#include <tensorflow/lite/minimal_logging.h>
24#include <tensorflow/lite/kernels/kernel_util.h>
25
Mike Kelly07169c82023-08-02 13:23:09 +010026#include <fmt/format.h>
27
Matthew Sloyan11572322023-03-16 10:17:51 +000028namespace
29{
30
31// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
32#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
33try \
34{ \
35 for (auto&& backendId : backends) \
36 { \
37 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
38 if (layerSupportObject.IsBackendRegistered()) \
39 { \
40 std::string reasonIfUnsupported; \
41 supported = \
42 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
43 if (supported) \
44 { \
45 setBackend = backendId; \
46 break; \
47 } \
48 else \
49 { \
50 if (reasonIfUnsupported.size() > 0) \
51 { \
52 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
53 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
54 } \
55 else \
56 { \
57 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
58 "%s: not supported by armnn", opName); \
59 } \
60 } \
61 } \
62 else \
63 { \
64 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
65 } \
66 } \
67 if (!supported) \
68 { \
69 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
70 } \
71} \
72catch (const armnn::InvalidArgumentException &e) \
73{ \
74 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
75}
76
Mike Kelly07169c82023-08-02 13:23:09 +010077std::string GetLayerName(armnn::ActivationFunction function, int nodeIndex)
78{
79 return fmt::format("{}:{}", GetActivationFunctionAsCString(function), nodeIndex);
80}
81
82std::string GetLayerName(armnn::ArgMinMaxFunction function, int nodeIndex)
83{
84 return fmt::format("{}:{}", GetArgMinMaxFunctionAsCString(function), nodeIndex);
85}
86
87std::string GetLayerName(armnn::BinaryOperation opType, int nodeIndex)
88{
89 return fmt::format("{}:{}", GetBinaryOperationAsCString(opType), nodeIndex);
90}
91
92std::string GetLayerName(armnn::ComparisonOperation layerType, int nodeIndex)
93{
94 return fmt::format("{}:{}", GetComparisonOperationAsCString(layerType), nodeIndex);
95}
96
97std::string GetLayerName(armnn::LogicalBinaryOperation operation, int nodeIndex)
98{
99 return fmt::format("{}:{}", GetLogicalBinaryOperationAsCString(operation), nodeIndex);
100}
101
102std::string GetLayerName(armnn::UnaryOperation opType, int nodeIndex)
103{
104 return fmt::format("{}:{}", GetUnaryOperationAsCString(opType), nodeIndex);
105}
106
107std::string GetLayerName(armnn::LayerType layerType, int nodeIndex, std::string name = "")
108{
109 return fmt::format("{}{}:{}", GetLayerTypeAsCString(layerType), name, nodeIndex);
110}
111
Matthew Sloyan11572322023-03-16 10:17:51 +0000112TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
113 TfLiteNode* tfLiteNode,
114 const unsigned int expectedSize,
115 int nodeIndex)
116{
117 auto numInputs = tfLiteNode->inputs->size;
118 if (static_cast<unsigned int >(numInputs) != expectedSize)
119 {
120 TF_LITE_MAYBE_KERNEL_LOG(
121 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
122 numInputs, expectedSize, nodeIndex);
123 return kTfLiteError;
124 }
125 return kTfLiteOk;
126}
127
128TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
129 TfLiteNode* tfLiteNode,
130 const unsigned int expectedSize,
131 int nodeIndex)
132{
133 auto numOutputs = tfLiteNode->outputs->size;
134 if (static_cast<unsigned int >(numOutputs) != expectedSize)
135 {
136 TF_LITE_MAYBE_KERNEL_LOG(
137 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
138 numOutputs, expectedSize, nodeIndex);
139 return kTfLiteError;
140 }
141 return kTfLiteOk;
142}
143
144bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
145{
146 auto tensorAllocationType = tfLiteTensor.allocation_type;
147 if (tensorAllocationType == kTfLiteDynamic)
148 {
149 return true;
150 }
151 return false;
152}
153
154bool IsValid(const TfLiteTensor* tfLiteTensor)
155{
156 return tfLiteTensor == nullptr ? false : true;
157}
158
159bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
160{
161 if(!IsValid(&tfLiteTensor))
162 {
163 std::cout << "..Is Not Valid" << std::endl;
164 TF_LITE_MAYBE_KERNEL_LOG(
165 tfLiteContext,
166 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
167 operatorCode, nodeIndex);
168 return false;
169 }
170 if (IsDynamicTensor(tfLiteTensor))
171 {
172 std::cout << "..IsDynamicTensor" << std::endl;
173 TF_LITE_MAYBE_KERNEL_LOG(
174 tfLiteContext,
175 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
176 operatorCode, nodeIndex);
177 return false;
178 }
179 return true;
180}
181
182bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
183{
184 auto quantizationInfo = tfLiteTensor.quantization;
185 if (quantizationInfo.type == kTfLiteAffineQuantization)
186 {
187 return true;
188 }
189 return false;
190}
191
192TfLiteStatus Connect(armnn::IConnectableLayer* layer,
193 TfLiteNode* tfLiteNode,
194 armnnDelegate::DelegateData& data)
195{
Ryan OSheac229b3f2023-06-27 22:34:54 +0100196 if (static_cast<unsigned int>(tfLiteNode->outputs->size) != layer->GetNumOutputSlots())
197 {
198 return kTfLiteError;
199 }
Matthew Sloyan11572322023-03-16 10:17:51 +0000200
201 // Connect the input slots
202 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
203 {
204 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
205 {
206 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
207 }
208 }
209
210 // Prepare output slots
211 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
212 {
213 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
214 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
215 }
216
217 return kTfLiteOk;
218}
219
220TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
221 TfLiteNode* tfLiteNode,
222 TfLiteFusedActivation activationType,
223 armnn::IConnectableLayer* prevLayer,
224 unsigned int outputSlotIndex,
Mike Kelly07169c82023-08-02 13:23:09 +0100225 armnnDelegate::DelegateData& data,
226 int nodeIndex)
Matthew Sloyan11572322023-03-16 10:17:51 +0000227{
228
229 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
230
231 armnn::ActivationDescriptor activationDesc;
232
233 switch (activationType)
234 {
235 case kTfLiteActNone:
236 {
237 // No Activation
238 return kTfLiteOk;
239 }
240 case kTfLiteActRelu:
241 {
242 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
243 break;
244 }
245// The name of kTfLiteActRelu1 changed after TF Lite v2.3
246#if defined(ARMNN_POST_TFLITE_2_3)
247 case kTfLiteActReluN1To1:
248#else
249 case kTfLiteActRelu1:
250#endif
251 {
252 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
253 activationDesc.m_A = 1.0f;
254 activationDesc.m_B = -1.0f;
255 break;
256 }
257 case kTfLiteActRelu6:
258 {
259 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
260 activationDesc.m_A = 6.0f;
261 activationDesc.m_B = 0.0f;
262 break;
263 }
264 case kTfLiteActSigmoid:
265 {
266 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
267 break;
268 }
269 case kTfLiteActTanh:
270 {
271 activationDesc.m_Function = armnn::ActivationFunction::TanH;
272 activationDesc.m_A = 1.0f;
273 activationDesc.m_B = 1.0f;
274 break;
275 }
276 default:
277 return kTfLiteError;
278 }
279
280 bool isSupported = false;
281 armnn::BackendId setBackend;
282 FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
283 tfLiteContext,
284 IsActivationSupported,
285 data.m_Backends,
286 isSupported,
287 setBackend,
288 activationOutputInfo,
289 activationOutputInfo,
290 activationDesc);
291 if (!isSupported)
292 {
293 return kTfLiteError;
294 }
Mike Kelly07169c82023-08-02 13:23:09 +0100295 auto layerName = GetLayerName(activationDesc.m_Function, nodeIndex);
296 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc, layerName.c_str());
Matthew Sloyan11572322023-03-16 10:17:51 +0000297 activationLayer->SetBackendId(setBackend);
298
299 ARMNN_ASSERT(activationLayer != nullptr);
300 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
301
302 // Connect and prepare output slots
303 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
304 {
305 data.m_OutputSlotForNode[static_cast<unsigned long>(
306 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
307 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
308 data.m_OutputSlotForNode[static_cast<unsigned long>(
309 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
310 }
311 return kTfLiteOk;
312}
313
314armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
315 TfLiteNode* tfLiteNode,
316 armnn::IConnectableLayer* prevLayer,
317 armnn::TensorInfo reshapedOutputTensorInfo,
318 armnn::TensorInfo outputTensorInfo,
Mike Kelly07169c82023-08-02 13:23:09 +0100319 armnnDelegate::DelegateData& data,
320 int nodeIndex)
Matthew Sloyan11572322023-03-16 10:17:51 +0000321{
322 armnn::ReshapeDescriptor desc;
323 desc.m_TargetShape = outputTensorInfo.GetShape();
324
325 bool isSupported = false;
326 armnn::BackendId setBackend;
327 FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
328 tfLiteContext,
329 IsReshapeSupported,
330 data.m_Backends,
331 isSupported,
332 setBackend,
333 reshapedOutputTensorInfo,
334 outputTensorInfo,
335 desc);
336
337 if (!isSupported)
338 {
339 return nullptr;
340 }
341
Mike Kelly07169c82023-08-02 13:23:09 +0100342 auto layerName = GetLayerName(armnn::LayerType::Reshape, nodeIndex);
343 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc, layerName.c_str());
Matthew Sloyan11572322023-03-16 10:17:51 +0000344 reshapeLayer->SetBackendId(setBackend);
345 ARMNN_ASSERT(reshapeLayer != nullptr);
346
347 prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
348 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
349
350 // Connect and prepare output slots
351 for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
352 {
353 data.m_OutputSlotForNode[static_cast<unsigned long>(
354 tfLiteNode->outputs->data[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
355 armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
356 data.m_OutputSlotForNode[static_cast<unsigned long>(
357 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
358 }
359 return reshapeLayer;
360}
361
362armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
363{
364 switch (tfLiteTensor.type)
365 {
366 case kTfLiteBool:
367 return armnn::DataType::Boolean;
368 case kTfLiteFloat32:
369 return armnn::DataType::Float32;
370 case kTfLiteFloat16:
371 return armnn::DataType::Float16;
372 case kTfLiteUInt8:
373 return armnn::DataType::QAsymmU8;
374 case kTfLiteInt8:
375 {
376 auto quantizationInfo = tfLiteTensor.quantization;
377 if (quantizationInfo.type == kTfLiteAffineQuantization)
378 {
379 auto* quantization =
380 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
381 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
382 {
383 return armnn::DataType::QAsymmS8;
384 }
385 else
386 {
387 return armnn::DataType::QSymmS8;
388 }
389 }
390 else
391 {
392 return armnn::DataType::QAsymmS8;
393 }
394 }
395 case kTfLiteInt16:
396 return armnn::DataType::QSymmS16;
397 case kTfLiteInt32:
398 return armnn::DataType::Signed32;
399 case kTfLiteInt64:
400 return armnn::DataType::Signed64;
401 default:
402 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
403 }
404}
405
406armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
407{
408 armnn::DataType type = GetDataType(tfLiteTensor);
409 armnn::TensorInfo ret;
410 auto tensorDimensionSize = tfLiteTensor.dims->size;
411 if (tensorDimensionSize == 0)
412 {
413 // If input tensor does not have a shape
414 // assuming that it has 1D tensor
415 if (!isOutput)
416 {
417 std::vector<unsigned int> safeShape = { 1 };
418 bool dimensionsSpecificity[1] = { true };
Mike Kelly460a1792023-08-01 11:31:55 +0100419 armnn::TensorShape tensorShape(safeShape.size(),
Matthew Sloyan11572322023-03-16 10:17:51 +0000420 safeShape.data(),
421 dimensionsSpecificity);
422 ret = armnn::TensorInfo(tensorShape, type);
423 if(tflite::IsConstantTensor(&tfLiteTensor))
424 {
425 ret.SetConstant(true);
426 }
427 }
428 else
429 {
430 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
431 ret = armnn::TensorInfo(tensorShape, type);
432 }
433 }
434 else
435 {
Mike Kelly460a1792023-08-01 11:31:55 +0100436 std::vector<unsigned int> tensorDims(tensorDimensionSize);
437 std::vector<unsigned char> dimensionsSpecificity(tensorDimensionSize, true);
438 for (int i = 0; i < tensorDimensionSize; ++i) {
Matthew Sloyan11572322023-03-16 10:17:51 +0000439 auto dim = tfLiteTensor.dims->data[i];
Idriss Chaouch5bbbc022023-10-04 12:25:32 +0100440 if (dim < 0)
Matthew Sloyan11572322023-03-16 10:17:51 +0000441 {
442 dimensionsSpecificity[i] = false;
443 }
444 tensorDims[i] = static_cast<unsigned int>(dim);
445 }
Mike Kelly460a1792023-08-01 11:31:55 +0100446 armnn::TensorShape tensorShape(tensorDimensionSize,
Matthew Sloyan11572322023-03-16 10:17:51 +0000447 tensorDims.data(),
Mike Kelly460a1792023-08-01 11:31:55 +0100448 reinterpret_cast<const bool *>(dimensionsSpecificity.data()));
Matthew Sloyan11572322023-03-16 10:17:51 +0000449
Mike Kelly460a1792023-08-01 11:31:55 +0100450 if (tflite::IsConstantTensor(&tfLiteTensor))
Matthew Sloyan11572322023-03-16 10:17:51 +0000451 {
452 ret = armnn::TensorInfo(tensorShape, type);
453 ret.SetConstant(true);
454 }
455 else
456 {
457 ret = armnn::TensorInfo(tensorShape, type);
458 }
459 }
460
461 auto quantizationInfo = tfLiteTensor.quantization;
462 if (quantizationInfo.type == kTfLiteAffineQuantization)
463 {
464 // get per-channel quantization parameters
465 const auto* affineQuantization =
466 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
467 if (affineQuantization->scale->size > 1)
468 {
469 std::vector<float> quantizationScales;
470 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
471 {
472 quantizationScales.push_back(affineQuantization->scale->data[i]);
473 }
474 ret.SetQuantizationScales(quantizationScales);
475 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
476 }
477 else
478 {
479 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
480 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
481 }
482 }
Matthew Sloyan11572322023-03-16 10:17:51 +0000483 return ret;
484}
485
486armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
487 const armnn::TensorInfo& tensorInfo)
488{
489 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
490 {
491 throw armnn::Exception(
492 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
493 }
494
495 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
496}
497
498armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
499{
500 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
501 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
502 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
503}
504
505bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
506{
507 // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
508 // less then the input is not present.
509 if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
510 {
511 return true;
512 }
513 return false;
514}
515
516TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
517 armnnDelegate::DelegateData& delegateData,
518 TfLiteContext* tfLiteContext,
Mike Kelly07169c82023-08-02 13:23:09 +0100519 TfLiteNode* tfLiteNode,
520 int nodeIndex)
Matthew Sloyan11572322023-03-16 10:17:51 +0000521{
522 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
523 // Process input tensors
524 // If input tensor is a Constant tensor create a constant layer and connect it to the network
525 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
526 {
527 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
528 if (tflite::IsConstantTensor(&tfLiteInputTensor))
529 {
530 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
531 bool isSupported = false;
532 armnn::BackendId setBackend;
533 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
534 tfLiteContext,
535 IsConstantSupported,
536 delegateData.m_Backends,
537 isSupported,
538 setBackend,
539 inputTensorInfo);
540 if (!isSupported)
541 {
542 return kTfLiteError;
543 }
544 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
545 inputTensorInfo);
Mike Kelly07169c82023-08-02 13:23:09 +0100546
547 auto layerName = GetLayerName(armnn::LayerType::Constant, nodeIndex);
548 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput,
549 layerName.c_str());
Matthew Sloyan11572322023-03-16 10:17:51 +0000550 constantLayer->SetBackendId(setBackend);
551 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
552 outputSlot.SetTensorInfo(inputTensorInfo);
553
554 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
555 }
556 }
557 return kTfLiteOk;
558}
559
560} // namespace anonymous