blob: 3e74225b1542c4af83bf8f458688ea1ad2308a9d [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Mike Kelly04d82292023-01-19 18:29:40 +00002// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include <armnn_delegate.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <armnn/ArmNN.hpp>
11#include <armnn/BackendHelper.hpp>
12#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000013#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010014
Sadik Armagan6e36a642020-11-10 21:18:41 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan62483be2020-10-23 17:14:43 +010017#include <tensorflow/lite/builtin_ops.h>
18#include <tensorflow/lite/c/builtin_op_data.h>
19#include <tensorflow/lite/c/common.h>
20#include <tensorflow/lite/minimal_logging.h>
21
Sadik Armagan05e9fd22020-11-17 12:01:47 +000022#include "tensorflow/lite/kernels/kernel_util.h"
23
Sadik Armagan62483be2020-10-23 17:14:43 +010024namespace
25{
26
27// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
Cathal Corbett53837672022-09-01 11:34:37 +010028#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
Sadik Armagan62483be2020-10-23 17:14:43 +010029try \
30{ \
31 for (auto&& backendId : backends) \
32 { \
33 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000034 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010035 { \
36 std::string reasonIfUnsupported; \
37 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000038 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010039 if (supported) \
40 { \
Cathal Corbett53837672022-09-01 11:34:37 +010041 setBackend = backendId; \
Sadik Armagan62483be2020-10-23 17:14:43 +010042 break; \
43 } \
44 else \
45 { \
46 if (reasonIfUnsupported.size() > 0) \
47 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000048 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000049 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010050 } \
51 else \
52 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000053 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000054 "%s: not supported by armnn", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010055 } \
56 } \
57 } \
58 else \
59 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000060 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010061 } \
62 } \
63 if (!supported) \
64 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000065 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010066 } \
67} \
68catch (const armnn::InvalidArgumentException &e) \
69{ \
70 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
71}
72
73TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
74 TfLiteNode* tfLiteNode,
75 const unsigned int expectedSize,
76 int nodeIndex)
77{
78 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000079 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010080 {
81 TF_LITE_MAYBE_KERNEL_LOG(
82 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
83 numInputs, expectedSize, nodeIndex);
84 return kTfLiteError;
85 }
86 return kTfLiteOk;
87}
88
89TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
90 TfLiteNode* tfLiteNode,
91 const unsigned int expectedSize,
92 int nodeIndex)
93{
94 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000095 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010096 {
97 TF_LITE_MAYBE_KERNEL_LOG(
98 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
99 numOutputs, expectedSize, nodeIndex);
100 return kTfLiteError;
101 }
102 return kTfLiteOk;
103}
104
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000105bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
106{
107 auto tensorAllocationType = tfLiteTensor.allocation_type;
108 if (tensorAllocationType == kTfLiteDynamic)
109 {
110 return true;
111 }
112 return false;
113}
114
Sadik Armagan6e36a642020-11-10 21:18:41 +0000115bool IsValid(const TfLiteTensor* tfLiteTensor)
116{
117 return tfLiteTensor == nullptr ? false : true;
118}
119
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000120bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
121{
122 if(!IsValid(&tfLiteTensor))
123 {
124 std::cout << "..Is Not Valid" << std::endl;
125 TF_LITE_MAYBE_KERNEL_LOG(
126 tfLiteContext,
127 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
128 operatorCode, nodeIndex);
129 return false;
130 }
131 if (IsDynamicTensor(tfLiteTensor))
132 {
133 std::cout << "..IsDynamicTensor" << std::endl;
134 TF_LITE_MAYBE_KERNEL_LOG(
135 tfLiteContext,
136 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
137 operatorCode, nodeIndex);
138 return false;
139 }
140 return true;
141}
142
Sadik Armagan32ca1442020-11-13 17:51:56 +0000143uint32_t NonNegative(int32_t value, int nodeIndex)
144{
145 if (value < 0)
146 {
Keith Davis892fafe2020-11-26 17:40:35 +0000147 throw armnn::Exception(
148 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000149 }
150 else
151 {
152 return static_cast<uint32_t>(value);
153 }
154}
155
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000156bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
157{
158 auto quantizationInfo = tfLiteTensor.quantization;
159 if (quantizationInfo.type == kTfLiteAffineQuantization)
160 {
161 return true;
162 }
163 return false;
164}
165
Sadik Armagan67e95f22020-10-29 16:14:54 +0000166TfLiteStatus Connect(armnn::IConnectableLayer* layer,
167 TfLiteNode* tfLiteNode,
168 armnnDelegate::DelegateData& data)
169{
Keith Davis892fafe2020-11-26 17:40:35 +0000170 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000171
172 // Connect the input slots
173 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
174 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000175 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
176 {
177 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
178 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000179 }
180
181 // Prepare output slots
182 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
183 {
184 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000185 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000186 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000187
Sadik Armagan67e95f22020-10-29 16:14:54 +0000188 return kTfLiteOk;
189}
190
191armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
192 const armnn::TensorInfo& inputInfo1,
193 armnn::IConnectableLayer* startLayer,
194 TfLiteContext* tfLiteContext,
195 TfLiteNode* tfLiteNode,
196 armnnDelegate::DelegateData& delegateData)
197{
198 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
199 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
200
201 if (inputDimensions0 == inputDimensions1)
202 {
203 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000204 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000205 }
206
207 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000208 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
209 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000210
211 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
212 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
213 const armnn::TensorShape& smallShape = smallInfo.GetShape();
214
215 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
216 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
217 {
218 reshapedDimensions[i] = smallShape[i - dimDifference];
219 }
220
221 armnn::TensorInfo reshapedInfo = smallInfo;
222 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
223 reshapedDimensions.data() });
224
225 armnn::ReshapeDescriptor reshapeDescriptor;
Sadik Armaganadeebaa2022-01-18 14:31:05 +0000226 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000227 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100228 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000229 FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000230 tfLiteContext,
231 IsReshapeSupported,
232 delegateData.m_Backends,
233 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100234 setBackend,
Sadik Armagan67e95f22020-10-29 16:14:54 +0000235 smallInfo,
236 reshapedInfo,
237 reshapeDescriptor);
238 if (!isSupported)
239 {
240 return nullptr;
241 }
242
243 ARMNN_ASSERT(delegateData.m_Network != nullptr);
244 // Add Reshape layer
Sadik Armagan67e95f22020-10-29 16:14:54 +0000245 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100246 reshapeLayer->SetBackendId(setBackend);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000247 ARMNN_ASSERT(reshapeLayer != nullptr);
248 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
249
250 if (input0IsSmaller)
251 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000252 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
253 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000254 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000255 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
256 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000257 }
258 else
259 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000260 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
261 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000262 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000263 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
264 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000265 }
266
267 // Prepare output slots
268 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
269 {
270 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000271 delegateData.m_OutputSlotForNode
272 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000273 }
274
275 return reshapeLayer;
276}
277
278TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
279 TfLiteNode* tfLiteNode,
280 TfLiteFusedActivation activationType,
281 armnn::IConnectableLayer* prevLayer,
282 unsigned int outputSlotIndex,
283 armnnDelegate::DelegateData& data)
284{
285
Finn Williams6f9f9902020-11-13 13:23:15 +0000286 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000287
288 armnn::ActivationDescriptor activationDesc;
289
290 switch (activationType)
291 {
292 case kTfLiteActNone:
293 {
294 // No Activation
295 return kTfLiteOk;
296 }
297 case kTfLiteActRelu:
298 {
299 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
300 break;
301 }
Keith Davis9a701c82021-09-28 16:43:24 +0100302// The name of kTfLiteActRelu1 changed after TF Lite v2.3
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100303#if defined(ARMNN_POST_TFLITE_2_3)
Keith Davis9a701c82021-09-28 16:43:24 +0100304 case kTfLiteActReluN1To1:
305#else
Sadik Armagan67e95f22020-10-29 16:14:54 +0000306 case kTfLiteActRelu1:
Keith Davis9a701c82021-09-28 16:43:24 +0100307#endif
Sadik Armagan67e95f22020-10-29 16:14:54 +0000308 {
309 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
310 activationDesc.m_A = 1.0f;
311 activationDesc.m_B = -1.0f;
312 break;
313 }
314 case kTfLiteActRelu6:
315 {
316 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
317 activationDesc.m_A = 6.0f;
318 activationDesc.m_B = 0.0f;
319 break;
320 }
321 case kTfLiteActSigmoid:
322 {
323 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
324 break;
325 }
326 case kTfLiteActTanh:
327 {
328 activationDesc.m_Function = armnn::ActivationFunction::TanH;
329 activationDesc.m_A = 1.0f;
330 activationDesc.m_B = 1.0f;
331 break;
332 }
333 default:
334 return kTfLiteError;
335 }
336
337 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100338 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000339 FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000340 tfLiteContext,
341 IsActivationSupported,
342 data.m_Backends,
343 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100344 setBackend,
Ryan OShea475c7a82023-01-30 14:24:15 +0000345 activationOutputInfo,
Sadik Armagan67e95f22020-10-29 16:14:54 +0000346 activationOutputInfo,
347 activationDesc);
348 if (!isSupported)
349 {
350 return kTfLiteError;
351 }
352 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
Cathal Corbett53837672022-09-01 11:34:37 +0100353 activationLayer->SetBackendId(setBackend);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000354
355 ARMNN_ASSERT(activationLayer != nullptr);
356 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
357
358 // Connect and prepare output slots
359 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
360 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000361 data.m_OutputSlotForNode[static_cast<unsigned long>(
362 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000363 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000364 data.m_OutputSlotForNode[static_cast<unsigned long>(
365 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000366 }
367 return kTfLiteOk;
368}
369
Mike Kelly04d82292023-01-19 18:29:40 +0000370armnn::IConnectableLayer* AddReshapeLayer(TfLiteContext* tfLiteContext,
371 TfLiteNode* tfLiteNode,
372 armnn::IConnectableLayer* prevLayer,
373 armnn::TensorInfo reshapedOutputTensorInfo,
374 armnn::TensorInfo outputTensorInfo,
375 armnnDelegate::DelegateData& data)
376{
377 armnn::ReshapeDescriptor desc;
378 desc.m_TargetShape = outputTensorInfo.GetShape();
379
380 bool isSupported = false;
381 armnn::BackendId setBackend;
382 FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
383 tfLiteContext,
384 IsReshapeSupported,
385 data.m_Backends,
386 isSupported,
387 setBackend,
388 reshapedOutputTensorInfo,
389 outputTensorInfo,
390 desc);
391
392 if (!isSupported)
393 {
394 return nullptr;
395 }
396
397 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(desc);
398 reshapeLayer->SetBackendId(setBackend);
399 ARMNN_ASSERT(reshapeLayer != nullptr);
400
401 prevLayer->GetOutputSlot(0).SetTensorInfo(reshapedOutputTensorInfo);
402 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
403
404 // Connect and prepare output slots
405 for (unsigned int outputIndex = 0; outputIndex < reshapeLayer->GetNumOutputSlots(); ++outputIndex)
406 {
407 data.m_OutputSlotForNode[static_cast<unsigned long>(
408 tfLiteNode->outputs->data[outputIndex])]->Connect(reshapeLayer->GetInputSlot(0));
409 armnn::IOutputSlot& outputSlot = reshapeLayer->GetOutputSlot(outputIndex);
410 data.m_OutputSlotForNode[static_cast<unsigned long>(
411 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
412 }
413 return reshapeLayer;
414}
415
Sadik Armagan6e36a642020-11-10 21:18:41 +0000416armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100417{
Sadik Armagan62483be2020-10-23 17:14:43 +0100418 switch (tfLiteTensor.type)
419 {
420 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000421 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100422 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000423 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100424 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000425 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100426 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000427 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100428 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000429 {
430 auto quantizationInfo = tfLiteTensor.quantization;
431 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000432 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000433 auto* quantization =
434 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
435 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
436 {
437 return armnn::DataType::QAsymmS8;
438 }
439 else
440 {
441 return armnn::DataType::QSymmS8;
442 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000443 }
444 else
445 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000446 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000447 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000448 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100449 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000450 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100451 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000452 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100453 case kTfLiteInt64:
454 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100455 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000456 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100457 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000458}
Sadik Armagan62483be2020-10-23 17:14:43 +0100459
Sadik Armagan90a119b2022-08-05 16:12:49 +0100460armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000461{
462 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100463 armnn::TensorInfo ret;
464 auto tensorDimensionSize = tfLiteTensor.dims->size;
465 if (tensorDimensionSize == 0)
466 {
Sadik Armagan90a119b2022-08-05 16:12:49 +0100467 // If input tensor does not have a shape
468 // assuming that it has 1D tensor
469 if (!isOutput)
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000470 {
471 std::vector<unsigned int> safeShape = { 1 };
472 bool dimensionsSpecificity[1] = { true };
473 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
474 safeShape.data(),
475 dimensionsSpecificity);
476 ret = armnn::TensorInfo(tensorShape, type);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100477 if(tflite::IsConstantTensor(&tfLiteTensor))
478 {
479 ret.SetConstant(true);
480 }
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000481 }
482 else
483 {
484 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
485 ret = armnn::TensorInfo(tensorShape, type);
486 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100487 }
488 else
489 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000490 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100491 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000492 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100493 auto dim = tfLiteTensor.dims->data[i];
494 if (dim == 0)
495 {
496 dimensionsSpecificity[i] = false;
497 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000498 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100499 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000500 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
501 tensorDims.data(),
502 dimensionsSpecificity);
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100503
504 if(tflite::IsConstantTensor(&tfLiteTensor))
505 {
506 ret = armnn::TensorInfo(tensorShape, type);
507 ret.SetConstant(true);
508 }
509 else
510 {
511 ret = armnn::TensorInfo(tensorShape, type);
512 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100513 }
514
515 auto quantizationInfo = tfLiteTensor.quantization;
516 if (quantizationInfo.type == kTfLiteAffineQuantization)
517 {
518 // get per-channel quantization parameters
519 const auto* affineQuantization =
520 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000521 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100522 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000523 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000524 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000525 {
526 quantizationScales.push_back(affineQuantization->scale->data[i]);
527 }
528 ret.SetQuantizationScales(quantizationScales);
Jan Eilers7612bd62021-04-06 17:29:03 +0100529 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
Sadik Armagan62483be2020-10-23 17:14:43 +0100530 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000531 else
532 {
533 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
534 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
535 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100536 }
537 else
538 {
539 auto quantizationParameters = tfLiteTensor.params;
540 ret.SetQuantizationScale(quantizationParameters.scale);
541 ret.SetQuantizationOffset(quantizationParameters.zero_point);
542 }
543
544 return ret;
545}
546
Sadik Armagan4189cc52020-11-11 18:01:48 +0000547armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000548 const armnn::TensorInfo& tensorInfo)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000549{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000550 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
551 {
Keith Davis892fafe2020-11-26 17:40:35 +0000552 throw armnn::Exception(
553 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000554 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000555
Ryan OShea4c231de2023-01-17 15:19:20 +0000556 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000557}
558
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100559armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
560{
561 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
562 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
563 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
564}
565
Sadik Armagan32ca1442020-11-13 17:51:56 +0000566void CalcPadding(uint32_t inputSize,
567 uint32_t filterSize,
568 uint32_t stride,
569 uint32_t dilation,
570 uint32_t& paddingFront,
571 uint32_t& paddingBack,
572 TfLitePadding padding)
573{
574 paddingFront = 0;
575 paddingBack = 0;
576 if (padding == kTfLitePaddingSame)
577 {
578 uint32_t outputSize = (inputSize + stride - 1) / stride;
579 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
580 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
581 if (temp > inputSize)
582 {
583 paddingFront = (temp - inputSize) / 2;
584 paddingBack = (temp - inputSize) - paddingFront;
585 }
586 }
587}
588
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000589TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
Ryan OShea4c231de2023-01-17 15:19:20 +0000590 const armnn::TensorInfo& constTensorInfo,
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000591 TfLiteContext* tfLiteContext,
592 const TfLiteTensor& tfLiteTensor,
593 armnnDelegate::DelegateData& data,
594 unsigned int slotIndex)
595{
Keith Davis892fafe2020-11-26 17:40:35 +0000596 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000597 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100598 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000599 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000600 tfLiteContext,
601 IsConstantSupported,
602 data.m_Backends,
603 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100604 setBackend,
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000605 constTensorInfo);
606 if (!isSupported)
607 {
608 return kTfLiteError;
609 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000610
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000611 auto constantInput = CreateConstTensor(&tfLiteTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000612 constTensorInfo);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000613 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
Cathal Corbett53837672022-09-01 11:34:37 +0100614 constantLayer->SetBackendId(setBackend);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000615 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
616 outputSlot.SetTensorInfo(constTensorInfo);
617
618 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
619
620 return kTfLiteOk;
621}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000622
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100623bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
624{
Mike Kelly84d63782022-05-06 12:14:16 +0100625 // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
626 // less then the input is not present.
627 if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
628 {
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100629 return true;
630 }
631 return false;
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100632}
633
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100634TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
635 armnnDelegate::DelegateData& delegateData,
636 TfLiteContext* tfLiteContext,
637 TfLiteNode* tfLiteNode)
638{
639 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
640 // Process input tensors
641 // If input tensor is a Constant tensor create a constant layer and connect it to the network
642 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
643 {
644 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
Jim Flynn4b2f3472021-10-13 21:20:07 +0100645 if (tflite::IsConstantTensor(&tfLiteInputTensor))
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100646 {
647 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
648 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100649 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000650 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100651 tfLiteContext,
652 IsConstantSupported,
653 delegateData.m_Backends,
654 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100655 setBackend,
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100656 inputTensorInfo);
657 if (!isSupported)
658 {
659 return kTfLiteError;
660 }
661 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000662 inputTensorInfo);
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100663 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
Cathal Corbett53837672022-09-01 11:34:37 +0100664 constantLayer->SetBackendId(setBackend);
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100665 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
666 outputSlot.SetTensorInfo(inputTensorInfo);
667
Sadik Armagan67ac7fa2021-05-07 14:16:13 +0100668 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100669 }
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100670 }
671 return kTfLiteOk;
672}
673
Matthew Sloyand30bfb52021-04-18 16:40:00 +0100674unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
675{
676 int numDims = armnn::numeric_cast<int>(numDimensions);
677 int wrappedIndex = index < 0 ? numDims + index : index;
678 ARMNN_ASSERT(wrappedIndex >= 0);
679 ARMNN_ASSERT(wrappedIndex < numDims);
680
681 return static_cast<unsigned int>(wrappedIndex);
682};
683
Jim Flynn4b2f3472021-10-13 21:20:07 +0100684bool AreAllSigned32(const armnn::TensorInfo& inputInfo1,
685 const armnn::TensorInfo& inputInfo2,
686 const armnn::TensorInfo& outputInfo)
687{
688 return (armnn::DataType::Signed32 == inputInfo1.GetDataType()) &&
689 (armnn::DataType::Signed32 == inputInfo2.GetDataType()) &&
690 (armnn::DataType::Signed32 == outputInfo.GetDataType());
691}
692
Sadik Armagan90a119b2022-08-05 16:12:49 +0100693void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::TensorInfo& outputInfo)
694{
695 // If input tensor info is constant and output tensor info shape is not specified
696 // set the output shape from input shape
697 if (inputInfo.IsConstant() && outputInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
698 {
699 outputInfo.SetShape(inputInfo.GetShape());
700 }
701 return;
702}
703
Sadik Armagan62483be2020-10-23 17:14:43 +0100704} // namespace anonymous