blob: 1aa902927169373a774476b6e4cc8291532d41f1 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Mike Kelly04d82292023-01-19 18:29:40 +00002// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include <armnn_delegate.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <armnn/ArmNN.hpp>
11#include <armnn/BackendHelper.hpp>
12#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000013#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010014
Sadik Armagan6e36a642020-11-10 21:18:41 +000015#include <armnnUtils/Permute.hpp>
Ryan OSheaa544f0f2023-01-25 18:10:20 +000016#include <armnnUtils/TensorUtils.hpp>
Sadik Armagan6e36a642020-11-10 21:18:41 +000017
Sadik Armagan62483be2020-10-23 17:14:43 +010018#include <tensorflow/lite/builtin_ops.h>
19#include <tensorflow/lite/c/builtin_op_data.h>
20#include <tensorflow/lite/c/common.h>
21#include <tensorflow/lite/minimal_logging.h>
22
Sadik Armagan05e9fd22020-11-17 12:01:47 +000023#include "tensorflow/lite/kernels/kernel_util.h"
24
Sadik Armagan62483be2020-10-23 17:14:43 +010025namespace
26{
27
28// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
Cathal Corbett53837672022-09-01 11:34:37 +010029#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
Sadik Armagan62483be2020-10-23 17:14:43 +010030try \
31{ \
32 for (auto&& backendId : backends) \
33 { \
34 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000035 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010036 { \
37 std::string reasonIfUnsupported; \
38 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000039 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010040 if (supported) \
41 { \
Cathal Corbett53837672022-09-01 11:34:37 +010042 setBackend = backendId; \
Sadik Armagan62483be2020-10-23 17:14:43 +010043 break; \
44 } \
45 else \
46 { \
47 if (reasonIfUnsupported.size() > 0) \
48 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000049 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000050 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010051 } \
52 else \
53 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000054 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000055 "%s: not supported by armnn", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010056 } \
57 } \
58 } \
59 else \
60 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000061 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010062 } \
63 } \
64 if (!supported) \
65 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000066 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010067 } \
68} \
69catch (const armnn::InvalidArgumentException &e) \
70{ \
71 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
72}
73
74TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
75 TfLiteNode* tfLiteNode,
76 const unsigned int expectedSize,
77 int nodeIndex)
78{
79 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000080 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010081 {
82 TF_LITE_MAYBE_KERNEL_LOG(
83 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
84 numInputs, expectedSize, nodeIndex);
85 return kTfLiteError;
86 }
87 return kTfLiteOk;
88}
89
90TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
91 TfLiteNode* tfLiteNode,
92 const unsigned int expectedSize,
93 int nodeIndex)
94{
95 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000096 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010097 {
98 TF_LITE_MAYBE_KERNEL_LOG(
99 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
100 numOutputs, expectedSize, nodeIndex);
101 return kTfLiteError;
102 }
103 return kTfLiteOk;
104}
105
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000106bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
107{
108 auto tensorAllocationType = tfLiteTensor.allocation_type;
109 if (tensorAllocationType == kTfLiteDynamic)
110 {
111 return true;
112 }
113 return false;
114}
115
Sadik Armagan6e36a642020-11-10 21:18:41 +0000116bool IsValid(const TfLiteTensor* tfLiteTensor)
117{
118 return tfLiteTensor == nullptr ? false : true;
119}
120
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000121bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
122{
123 if(!IsValid(&tfLiteTensor))
124 {
125 std::cout << "..Is Not Valid" << std::endl;
126 TF_LITE_MAYBE_KERNEL_LOG(
127 tfLiteContext,
128 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
129 operatorCode, nodeIndex);
130 return false;
131 }
132 if (IsDynamicTensor(tfLiteTensor))
133 {
134 std::cout << "..IsDynamicTensor" << std::endl;
135 TF_LITE_MAYBE_KERNEL_LOG(
136 tfLiteContext,
137 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
138 operatorCode, nodeIndex);
139 return false;
140 }
141 return true;
142}
143
Sadik Armagan32ca1442020-11-13 17:51:56 +0000144uint32_t NonNegative(int32_t value, int nodeIndex)
145{
146 if (value < 0)
147 {
Keith Davis892fafe2020-11-26 17:40:35 +0000148 throw armnn::Exception(
149 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000150 }
151 else
152 {
153 return static_cast<uint32_t>(value);
154 }
155}
156
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000157bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
158{
159 auto quantizationInfo = tfLiteTensor.quantization;
160 if (quantizationInfo.type == kTfLiteAffineQuantization)
161 {
162 return true;
163 }
164 return false;
165}
166
Sadik Armagan67e95f22020-10-29 16:14:54 +0000167TfLiteStatus Connect(armnn::IConnectableLayer* layer,
168 TfLiteNode* tfLiteNode,
169 armnnDelegate::DelegateData& data)
170{
Keith Davis892fafe2020-11-26 17:40:35 +0000171 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000172
173 // Connect the input slots
174 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
175 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000176 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
177 {
178 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
179 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000180 }
181
182 // Prepare output slots
183 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
184 {
185 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000186 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000187 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000188
Sadik Armagan67e95f22020-10-29 16:14:54 +0000189 return kTfLiteOk;
190}
191
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000192void ExpandTensorRankToEqual(armnn::TensorInfo& inputInfo0,
193 armnn::TensorInfo& inputInfo1)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000194{
195 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
196 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
197
198 if (inputDimensions0 == inputDimensions1)
199 {
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000200 return;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000201 }
202
203 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000204
205 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000206 armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
207 const armnn::TensorShape& newShape = armnnUtils::ExpandDimsToRank(smallInfo.GetShape(), biggerInputDimensions);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000208
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000209 smallInfo.SetShape(newShape);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000210
Sadik Armagan67e95f22020-10-29 16:14:54 +0000211}
212
213TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
214 TfLiteNode* tfLiteNode,
215 TfLiteFusedActivation activationType,
216 armnn::IConnectableLayer* prevLayer,
217 unsigned int outputSlotIndex,
218 armnnDelegate::DelegateData& data)
219{
220
Finn Williams6f9f9902020-11-13 13:23:15 +0000221 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000222
223 armnn::ActivationDescriptor activationDesc;
224
225 switch (activationType)
226 {
227 case kTfLiteActNone:
228 {
229 // No Activation
230 return kTfLiteOk;
231 }
232 case kTfLiteActRelu:
233 {
234 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
235 break;
236 }
Keith Davis9a701c82021-09-28 16:43:24 +0100237// The name of kTfLiteActRelu1 changed after TF Lite v2.3
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100238#if defined(ARMNN_POST_TFLITE_2_3)
Keith Davis9a701c82021-09-28 16:43:24 +0100239 case kTfLiteActReluN1To1:
240#else
Sadik Armagan67e95f22020-10-29 16:14:54 +0000241 case kTfLiteActRelu1:
Keith Davis9a701c82021-09-28 16:43:24 +0100242#endif
Sadik Armagan67e95f22020-10-29 16:14:54 +0000243 {
244 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
245 activationDesc.m_A = 1.0f;
246 activationDesc.m_B = -1.0f;
247 break;
248 }
249 case kTfLiteActRelu6:
250 {
251 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
252 activationDesc.m_A = 6.0f;
253 activationDesc.m_B = 0.0f;
254 break;
255 }
256 case kTfLiteActSigmoid:
257 {
258 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
259 break;
260 }
261 case kTfLiteActTanh:
262 {
263 activationDesc.m_Function = armnn::ActivationFunction::TanH;
264 activationDesc.m_A = 1.0f;
265 activationDesc.m_B = 1.0f;
266 break;
267 }
268 default:
269 return kTfLiteError;
270 }
271
272 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100273 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000274 FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000275 tfLiteContext,
276 IsActivationSupported,
277 data.m_Backends,
278 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100279 setBackend,
Ryan OShea475c7a82023-01-30 14:24:15 +0000280 activationOutputInfo,
Sadik Armagan67e95f22020-10-29 16:14:54 +0000281 activationOutputInfo,
282 activationDesc);
283 if (!isSupported)
284 {
285 return kTfLiteError;
286 }
287 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
Cathal Corbett53837672022-09-01 11:34:37 +0100288 activationLayer->SetBackendId(setBackend);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000289
290 ARMNN_ASSERT(activationLayer != nullptr);
291 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
292
293 // Connect and prepare output slots
294 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
295 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000296 data.m_OutputSlotForNode[static_cast<unsigned long>(
297 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000298 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000299 data.m_OutputSlotForNode[static_cast<unsigned long>(
300 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000301 }
302 return kTfLiteOk;
303}
304
Mike Kelly04d82292023-01-19 18:29:40 +0000305armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
306 TfLiteNode* tfLiteNode,
307 armnn::IConnectableLayer* prevLayer,
308 armnn::TensorInfo reshapedOutputTensorInfo,
309 armnn::TensorInfo outputTensorInfo,
310 armnnDelegate::DelegateData& data)
311{
312 armnn::ReshapeDescriptor desc;
313 desc.m_TargetShape = outputTensorInfo.GetShape();
314
315 bool isSupported = false;
316 armnn::BackendId setBackend;
317 FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
318 tfLiteContext,
319 IsReshapeSupported,
320 data.m_Backends,
321 isSupported,
322 setBackend,
323 reshapedOutputTensorInfo,
324 outputTensorInfo,
325 desc);
326
327 if (!isSupported)
328 {
329 return nullptr;
330 }
331
332 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
333 reshapeLayer->SetBackendId(setBackend);
334 ARMNN_ASSERT(reshapeLayer != nullptr);
335
336 prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
337 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
338
339 // Connect and prepare output slots
340 for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
341 {
342 data.m_OutputSlotForNode[static_cast<unsigned long>(
343 tfLiteNode->outputs->data[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
344 armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
345 data.m_OutputSlotForNode[static_cast<unsigned long>(
346 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
347 }
348 return reshapeLayer;
349}
350
Sadik Armagan6e36a642020-11-10 21:18:41 +0000351armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100352{
Sadik Armagan62483be2020-10-23 17:14:43 +0100353 switch (tfLiteTensor.type)
354 {
355 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000356 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100357 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000358 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100359 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000360 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100361 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000362 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100363 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000364 {
365 auto quantizationInfo = tfLiteTensor.quantization;
366 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000367 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000368 auto* quantization =
369 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
370 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
371 {
372 return armnn::DataType::QAsymmS8;
373 }
374 else
375 {
376 return armnn::DataType::QSymmS8;
377 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000378 }
379 else
380 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000381 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000382 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000383 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100384 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000385 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100386 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000387 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100388 case kTfLiteInt64:
389 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100390 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000391 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100392 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000393}
Sadik Armagan62483be2020-10-23 17:14:43 +0100394
Sadik Armagan90a119b2022-08-05 16:12:49 +0100395armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000396{
397 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100398 armnn::TensorInfo ret;
399 auto tensorDimensionSize = tfLiteTensor.dims->size;
400 if (tensorDimensionSize == 0)
401 {
Sadik Armagan90a119b2022-08-05 16:12:49 +0100402 // If input tensor does not have a shape
403 // assuming that it has 1D tensor
404 if (!isOutput)
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000405 {
406 std::vector<unsigned int> safeShape = { 1 };
407 bool dimensionsSpecificity[1] = { true };
408 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
409 safeShape.data(),
410 dimensionsSpecificity);
411 ret = armnn::TensorInfo(tensorShape, type);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100412 if(tflite::IsConstantTensor(&tfLiteTensor))
413 {
414 ret.SetConstant(true);
415 }
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000416 }
417 else
418 {
419 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
420 ret = armnn::TensorInfo(tensorShape, type);
421 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100422 }
423 else
424 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000425 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100426 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000427 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100428 auto dim = tfLiteTensor.dims->data[i];
429 if (dim == 0)
430 {
431 dimensionsSpecificity[i] = false;
432 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000433 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100434 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000435 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
436 tensorDims.data(),
437 dimensionsSpecificity);
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100438
439 if(tflite::IsConstantTensor(&tfLiteTensor))
440 {
441 ret = armnn::TensorInfo(tensorShape, type);
442 ret.SetConstant(true);
443 }
444 else
445 {
446 ret = armnn::TensorInfo(tensorShape, type);
447 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100448 }
449
450 auto quantizationInfo = tfLiteTensor.quantization;
451 if (quantizationInfo.type == kTfLiteAffineQuantization)
452 {
453 // get per-channel quantization parameters
454 const auto* affineQuantization =
455 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000456 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100457 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000458 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000459 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000460 {
461 quantizationScales.push_back(affineQuantization->scale->data[i]);
462 }
463 ret.SetQuantizationScales(quantizationScales);
Jan Eilers7612bd62021-04-06 17:29:03 +0100464 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
Sadik Armagan62483be2020-10-23 17:14:43 +0100465 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000466 else
467 {
468 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
469 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
470 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100471 }
472 else
473 {
474 auto quantizationParameters = tfLiteTensor.params;
475 ret.SetQuantizationScale(quantizationParameters.scale);
476 ret.SetQuantizationOffset(quantizationParameters.zero_point);
477 }
478
479 return ret;
480}
481
Sadik Armagan4189cc52020-11-11 18:01:48 +0000482armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000483 const armnn::TensorInfo& tensorInfo)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000484{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000485 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
486 {
Keith Davis892fafe2020-11-26 17:40:35 +0000487 throw armnn::Exception(
488 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000489 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000490
Ryan OShea4c231de2023-01-17 15:19:20 +0000491 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000492}
493
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100494armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
495{
496 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
497 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
498 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
499}
500
Sadik Armagan32ca1442020-11-13 17:51:56 +0000501void CalcPadding(uint32_t inputSize,
502 uint32_t filterSize,
503 uint32_t stride,
504 uint32_t dilation,
505 uint32_t& paddingFront,
506 uint32_t& paddingBack,
507 TfLitePadding padding)
508{
509 paddingFront = 0;
510 paddingBack = 0;
511 if (padding == kTfLitePaddingSame)
512 {
513 uint32_t outputSize = (inputSize + stride - 1) / stride;
514 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
515 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
516 if (temp > inputSize)
517 {
518 paddingFront = (temp - inputSize) / 2;
519 paddingBack = (temp - inputSize) - paddingFront;
520 }
521 }
522}
523
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000524TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
Ryan OShea4c231de2023-01-17 15:19:20 +0000525 const armnn::TensorInfo& constTensorInfo,
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000526 TfLiteContext* tfLiteContext,
527 const TfLiteTensor& tfLiteTensor,
528 armnnDelegate::DelegateData& data,
529 unsigned int slotIndex)
530{
Keith Davis892fafe2020-11-26 17:40:35 +0000531 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000532 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100533 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000534 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000535 tfLiteContext,
536 IsConstantSupported,
537 data.m_Backends,
538 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100539 setBackend,
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000540 constTensorInfo);
541 if (!isSupported)
542 {
543 return kTfLiteError;
544 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000545
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000546 auto constantInput = CreateConstTensor(&tfLiteTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000547 constTensorInfo);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000548 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
Cathal Corbett53837672022-09-01 11:34:37 +0100549 constantLayer->SetBackendId(setBackend);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000550 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
551 outputSlot.SetTensorInfo(constTensorInfo);
552
553 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
554
555 return kTfLiteOk;
556}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000557
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100558bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
559{
Mike Kelly84d63782022-05-06 12:14:16 +0100560 // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
561 // less then the input is not present.
562 if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
563 {
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100564 return true;
565 }
566 return false;
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100567}
568
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100569TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
570 armnnDelegate::DelegateData& delegateData,
571 TfLiteContext* tfLiteContext,
572 TfLiteNode* tfLiteNode)
573{
574 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
575 // Process input tensors
576 // If input tensor is a Constant tensor create a constant layer and connect it to the network
577 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
578 {
579 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
Jim Flynn4b2f3472021-10-13 21:20:07 +0100580 if (tflite::IsConstantTensor(&tfLiteInputTensor))
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100581 {
582 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
583 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100584 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000585 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100586 tfLiteContext,
587 IsConstantSupported,
588 delegateData.m_Backends,
589 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100590 setBackend,
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100591 inputTensorInfo);
592 if (!isSupported)
593 {
594 return kTfLiteError;
595 }
596 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000597 inputTensorInfo);
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100598 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
Cathal Corbett53837672022-09-01 11:34:37 +0100599 constantLayer->SetBackendId(setBackend);
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100600 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
601 outputSlot.SetTensorInfo(inputTensorInfo);
602
Sadik Armagan67ac7fa2021-05-07 14:16:13 +0100603 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100604 }
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100605 }
606 return kTfLiteOk;
607}
608
Matthew Sloyand30bfb52021-04-18 16:40:00 +0100609unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
610{
611 int numDims = armnn::numeric_cast<int>(numDimensions);
612 int wrappedIndex = index < 0 ? numDims + index : index;
613 ARMNN_ASSERT(wrappedIndex >= 0);
614 ARMNN_ASSERT(wrappedIndex < numDims);
615
616 return static_cast<unsigned int>(wrappedIndex);
617};
618
Jim Flynn4b2f3472021-10-13 21:20:07 +0100619bool AreAllSigned32(const armnn::TensorInfo& inputInfo1,
620 const armnn::TensorInfo& inputInfo2,
621 const armnn::TensorInfo& outputInfo)
622{
623 return (armnn::DataType::Signed32 == inputInfo1.GetDataType()) &&
624 (armnn::DataType::Signed32 == inputInfo2.GetDataType()) &&
625 (armnn::DataType::Signed32 == outputInfo.GetDataType());
626}
627
Sadik Armagan90a119b2022-08-05 16:12:49 +0100628void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::TensorInfo& outputInfo)
629{
630 // If input tensor info is constant and output tensor info shape is not specified
631 // set the output shape from input shape
632 if (inputInfo.IsConstant() && outputInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
633 {
634 outputInfo.SetShape(inputInfo.GetShape());
635 }
636 return;
637}
638
Sadik Armagan62483be2020-10-23 17:14:43 +0100639} // namespace anonymous