blob: 58d8048be36786899a63af50a46aca26ae950d7d [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Sadik Armagan90a119b2022-08-05 16:12:49 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include <armnn_delegate.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <armnn/ArmNN.hpp>
11#include <armnn/BackendHelper.hpp>
12#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000013#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010014
Sadik Armagan6e36a642020-11-10 21:18:41 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan62483be2020-10-23 17:14:43 +010017#include <tensorflow/lite/builtin_ops.h>
18#include <tensorflow/lite/c/builtin_op_data.h>
19#include <tensorflow/lite/c/common.h>
20#include <tensorflow/lite/minimal_logging.h>
21
Sadik Armagan05e9fd22020-11-17 12:01:47 +000022#include "tensorflow/lite/kernels/kernel_util.h"
23
Sadik Armagan62483be2020-10-23 17:14:43 +010024namespace
25{
26
27// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
Sadik Armaganbfa767c2022-02-09 14:58:03 +000028#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, ...) \
Sadik Armagan62483be2020-10-23 17:14:43 +010029try \
30{ \
31 for (auto&& backendId : backends) \
32 { \
33 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000034 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010035 { \
36 std::string reasonIfUnsupported; \
37 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000038 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010039 if (supported) \
40 { \
41 break; \
42 } \
43 else \
44 { \
45 if (reasonIfUnsupported.size() > 0) \
46 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000047 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000048 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010049 } \
50 else \
51 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000052 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000053 "%s: not supported by armnn", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010054 } \
55 } \
56 } \
57 else \
58 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000059 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010060 } \
61 } \
62 if (!supported) \
63 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000064 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010065 } \
66} \
67catch (const armnn::InvalidArgumentException &e) \
68{ \
69 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
70}
71
72TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
73 TfLiteNode* tfLiteNode,
74 const unsigned int expectedSize,
75 int nodeIndex)
76{
77 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000078 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010079 {
80 TF_LITE_MAYBE_KERNEL_LOG(
81 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
82 numInputs, expectedSize, nodeIndex);
83 return kTfLiteError;
84 }
85 return kTfLiteOk;
86}
87
88TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
89 TfLiteNode* tfLiteNode,
90 const unsigned int expectedSize,
91 int nodeIndex)
92{
93 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000094 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010095 {
96 TF_LITE_MAYBE_KERNEL_LOG(
97 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
98 numOutputs, expectedSize, nodeIndex);
99 return kTfLiteError;
100 }
101 return kTfLiteOk;
102}
103
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000104bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
105{
106 auto tensorAllocationType = tfLiteTensor.allocation_type;
107 if (tensorAllocationType == kTfLiteDynamic)
108 {
109 return true;
110 }
111 return false;
112}
113
Sadik Armagan6e36a642020-11-10 21:18:41 +0000114bool IsValid(const TfLiteTensor* tfLiteTensor)
115{
116 return tfLiteTensor == nullptr ? false : true;
117}
118
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000119bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
120{
121 if(!IsValid(&tfLiteTensor))
122 {
123 std::cout << "..Is Not Valid" << std::endl;
124 TF_LITE_MAYBE_KERNEL_LOG(
125 tfLiteContext,
126 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
127 operatorCode, nodeIndex);
128 return false;
129 }
130 if (IsDynamicTensor(tfLiteTensor))
131 {
132 std::cout << "..IsDynamicTensor" << std::endl;
133 TF_LITE_MAYBE_KERNEL_LOG(
134 tfLiteContext,
135 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
136 operatorCode, nodeIndex);
137 return false;
138 }
139 return true;
140}
141
Sadik Armagan32ca1442020-11-13 17:51:56 +0000142uint32_t NonNegative(int32_t value, int nodeIndex)
143{
144 if (value < 0)
145 {
Keith Davis892fafe2020-11-26 17:40:35 +0000146 throw armnn::Exception(
147 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000148 }
149 else
150 {
151 return static_cast<uint32_t>(value);
152 }
153}
154
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000155bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
156{
157 auto quantizationInfo = tfLiteTensor.quantization;
158 if (quantizationInfo.type == kTfLiteAffineQuantization)
159 {
160 return true;
161 }
162 return false;
163}
164
Sadik Armagan67e95f22020-10-29 16:14:54 +0000165TfLiteStatus Connect(armnn::IConnectableLayer* layer,
166 TfLiteNode* tfLiteNode,
167 armnnDelegate::DelegateData& data)
168{
Keith Davis892fafe2020-11-26 17:40:35 +0000169 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000170
171 // Connect the input slots
172 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
173 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000174 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
175 {
176 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
177 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000178 }
179
180 // Prepare output slots
181 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
182 {
183 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000184 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000185 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000186
Sadik Armagan67e95f22020-10-29 16:14:54 +0000187 return kTfLiteOk;
188}
189
190armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
191 const armnn::TensorInfo& inputInfo1,
192 armnn::IConnectableLayer* startLayer,
193 TfLiteContext* tfLiteContext,
194 TfLiteNode* tfLiteNode,
195 armnnDelegate::DelegateData& delegateData)
196{
197 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
198 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
199
200 if (inputDimensions0 == inputDimensions1)
201 {
202 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000203 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000204 }
205
206 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000207 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
208 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000209
210 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
211 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
212 const armnn::TensorShape& smallShape = smallInfo.GetShape();
213
214 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
215 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
216 {
217 reshapedDimensions[i] = smallShape[i - dimDifference];
218 }
219
220 armnn::TensorInfo reshapedInfo = smallInfo;
221 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
222 reshapedDimensions.data() });
223
224 armnn::ReshapeDescriptor reshapeDescriptor;
Sadik Armaganadeebaa2022-01-18 14:31:05 +0000225 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000226 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000227 FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000228 tfLiteContext,
229 IsReshapeSupported,
230 delegateData.m_Backends,
231 isSupported,
232 smallInfo,
233 reshapedInfo,
234 reshapeDescriptor);
235 if (!isSupported)
236 {
237 return nullptr;
238 }
239
240 ARMNN_ASSERT(delegateData.m_Network != nullptr);
241 // Add Reshape layer
Sadik Armagan67e95f22020-10-29 16:14:54 +0000242 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
243 ARMNN_ASSERT(reshapeLayer != nullptr);
244 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
245
246 if (input0IsSmaller)
247 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000248 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
249 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000250 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000251 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
252 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000253 }
254 else
255 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000256 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
257 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000258 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000259 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
260 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000261 }
262
263 // Prepare output slots
264 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
265 {
266 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000267 delegateData.m_OutputSlotForNode
268 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000269 }
270
271 return reshapeLayer;
272}
273
274TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
275 TfLiteNode* tfLiteNode,
276 TfLiteFusedActivation activationType,
277 armnn::IConnectableLayer* prevLayer,
278 unsigned int outputSlotIndex,
279 armnnDelegate::DelegateData& data)
280{
281
Finn Williams6f9f9902020-11-13 13:23:15 +0000282 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000283
284 armnn::ActivationDescriptor activationDesc;
285
286 switch (activationType)
287 {
288 case kTfLiteActNone:
289 {
290 // No Activation
291 return kTfLiteOk;
292 }
293 case kTfLiteActRelu:
294 {
295 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
296 break;
297 }
Keith Davis9a701c82021-09-28 16:43:24 +0100298// The name of kTfLiteActRelu1 changed after TF Lite v2.3
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100299#if defined(ARMNN_POST_TFLITE_2_3)
Keith Davis9a701c82021-09-28 16:43:24 +0100300 case kTfLiteActReluN1To1:
301#else
Sadik Armagan67e95f22020-10-29 16:14:54 +0000302 case kTfLiteActRelu1:
Keith Davis9a701c82021-09-28 16:43:24 +0100303#endif
Sadik Armagan67e95f22020-10-29 16:14:54 +0000304 {
305 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
306 activationDesc.m_A = 1.0f;
307 activationDesc.m_B = -1.0f;
308 break;
309 }
310 case kTfLiteActRelu6:
311 {
312 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
313 activationDesc.m_A = 6.0f;
314 activationDesc.m_B = 0.0f;
315 break;
316 }
317 case kTfLiteActSigmoid:
318 {
319 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
320 break;
321 }
322 case kTfLiteActTanh:
323 {
324 activationDesc.m_Function = armnn::ActivationFunction::TanH;
325 activationDesc.m_A = 1.0f;
326 activationDesc.m_B = 1.0f;
327 break;
328 }
329 default:
330 return kTfLiteError;
331 }
332
333 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000334 FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000335 tfLiteContext,
336 IsActivationSupported,
337 data.m_Backends,
338 isSupported,
339 prevLayer->GetOutputSlot(0).GetTensorInfo(),
340 activationOutputInfo,
341 activationDesc);
342 if (!isSupported)
343 {
344 return kTfLiteError;
345 }
346 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
347
348 ARMNN_ASSERT(activationLayer != nullptr);
349 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
350
351 // Connect and prepare output slots
352 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
353 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000354 data.m_OutputSlotForNode[static_cast<unsigned long>(
355 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000356 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000357 data.m_OutputSlotForNode[static_cast<unsigned long>(
358 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000359 }
360 return kTfLiteOk;
361}
362
Sadik Armagan6e36a642020-11-10 21:18:41 +0000363armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100364{
Sadik Armagan62483be2020-10-23 17:14:43 +0100365 switch (tfLiteTensor.type)
366 {
367 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000368 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100369 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000370 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100371 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000372 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100373 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000374 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100375 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000376 {
377 auto quantizationInfo = tfLiteTensor.quantization;
378 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000379 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000380 auto* quantization =
381 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
382 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
383 {
384 return armnn::DataType::QAsymmS8;
385 }
386 else
387 {
388 return armnn::DataType::QSymmS8;
389 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000390 }
391 else
392 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000393 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000394 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000395 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100396 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000397 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100398 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000399 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100400 case kTfLiteInt64:
401 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100402 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000403 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100404 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000405}
Sadik Armagan62483be2020-10-23 17:14:43 +0100406
Sadik Armagan90a119b2022-08-05 16:12:49 +0100407armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000408{
409 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100410 armnn::TensorInfo ret;
411 auto tensorDimensionSize = tfLiteTensor.dims->size;
412 if (tensorDimensionSize == 0)
413 {
Sadik Armagan90a119b2022-08-05 16:12:49 +0100414 // If input tensor does not have a shape
415 // assuming that it has 1D tensor
416 if (!isOutput)
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000417 {
418 std::vector<unsigned int> safeShape = { 1 };
419 bool dimensionsSpecificity[1] = { true };
420 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
421 safeShape.data(),
422 dimensionsSpecificity);
423 ret = armnn::TensorInfo(tensorShape, type);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100424 if(tflite::IsConstantTensor(&tfLiteTensor))
425 {
426 ret.SetConstant(true);
427 }
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000428 }
429 else
430 {
431 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
432 ret = armnn::TensorInfo(tensorShape, type);
433 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100434 }
435 else
436 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000437 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100438 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000439 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100440 auto dim = tfLiteTensor.dims->data[i];
441 if (dim == 0)
442 {
443 dimensionsSpecificity[i] = false;
444 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000445 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100446 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000447 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
448 tensorDims.data(),
449 dimensionsSpecificity);
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100450
451 if(tflite::IsConstantTensor(&tfLiteTensor))
452 {
453 ret = armnn::TensorInfo(tensorShape, type);
454 ret.SetConstant(true);
455 }
456 else
457 {
458 ret = armnn::TensorInfo(tensorShape, type);
459 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100460 }
461
462 auto quantizationInfo = tfLiteTensor.quantization;
463 if (quantizationInfo.type == kTfLiteAffineQuantization)
464 {
465 // get per-channel quantization parameters
466 const auto* affineQuantization =
467 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000468 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100469 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000470 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000471 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000472 {
473 quantizationScales.push_back(affineQuantization->scale->data[i]);
474 }
475 ret.SetQuantizationScales(quantizationScales);
Jan Eilers7612bd62021-04-06 17:29:03 +0100476 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
Sadik Armagan62483be2020-10-23 17:14:43 +0100477 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000478 else
479 {
480 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
481 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
482 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100483 }
484 else
485 {
486 auto quantizationParameters = tfLiteTensor.params;
487 ret.SetQuantizationScale(quantizationParameters.scale);
488 ret.SetQuantizationOffset(quantizationParameters.zero_point);
489 }
490
491 return ret;
492}
493
Sadik Armagan4189cc52020-11-11 18:01:48 +0000494armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
495 armnn::TensorInfo& tensorInfo,
Jan Eilers53ef7952021-06-02 12:01:25 +0100496 armnn::Optional<armnn::PermutationVector&>
497 permutationVector = armnn::EmptyOptional(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000498 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000499{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000500 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
501 {
Keith Davis892fafe2020-11-26 17:40:35 +0000502 throw armnn::Exception(
503 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000504 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000505
Matthew Sloyan81beae32021-07-13 19:46:11 +0100506 if(tflite::IsConstantTensor(tfLiteTensor))
507 {
508 tensorInfo.SetConstant();
509 }
510
Sadik Armagan32ca1442020-11-13 17:51:56 +0000511 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000512 {
Jan Eilers7612bd62021-04-06 17:29:03 +0100513 // Permute tensor info
514 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
515 // then permute data using the shape from permuted tensor info
516 armnnUtils::Permute(tensorInfo.GetShape(),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000517 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000518 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000519 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000520 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000521
Jan Eilers7612bd62021-04-06 17:29:03 +0100522 return armnn::ConstTensor(tensorInfo, permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000523 }
524 else
525 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000526 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000527 }
528}
529
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100530armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
531{
532 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
533 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
534 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
535}
536
Sadik Armagan32ca1442020-11-13 17:51:56 +0000537void CalcPadding(uint32_t inputSize,
538 uint32_t filterSize,
539 uint32_t stride,
540 uint32_t dilation,
541 uint32_t& paddingFront,
542 uint32_t& paddingBack,
543 TfLitePadding padding)
544{
545 paddingFront = 0;
546 paddingBack = 0;
547 if (padding == kTfLitePaddingSame)
548 {
549 uint32_t outputSize = (inputSize + stride - 1) / stride;
550 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
551 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
552 if (temp > inputSize)
553 {
554 paddingFront = (temp - inputSize) / 2;
555 paddingBack = (temp - inputSize) - paddingFront;
556 }
557 }
558}
559
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000560TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
561 armnn::TensorInfo& constTensorInfo,
562 TfLiteContext* tfLiteContext,
563 const TfLiteTensor& tfLiteTensor,
564 armnnDelegate::DelegateData& data,
565 unsigned int slotIndex)
566{
Keith Davis892fafe2020-11-26 17:40:35 +0000567 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000568 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000569 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000570 tfLiteContext,
571 IsConstantSupported,
572 data.m_Backends,
573 isSupported,
574 constTensorInfo);
575 if (!isSupported)
576 {
577 return kTfLiteError;
578 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000579
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000580 auto constantInput = CreateConstTensor(&tfLiteTensor,
581 constTensorInfo,
582 armnn::Optional<armnn::PermutationVector&>());
583 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
584 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
585 outputSlot.SetTensorInfo(constTensorInfo);
586
587 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
588
589 return kTfLiteOk;
590}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000591
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100592bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
593{
Mike Kelly84d63782022-05-06 12:14:16 +0100594 // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
595 // less then the input is not present.
596 if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
597 {
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100598 return true;
599 }
600 return false;
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100601}
602
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100603TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
604 armnnDelegate::DelegateData& delegateData,
605 TfLiteContext* tfLiteContext,
606 TfLiteNode* tfLiteNode)
607{
608 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
609 // Process input tensors
610 // If input tensor is a Constant tensor create a constant layer and connect it to the network
611 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
612 {
613 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
Jim Flynn4b2f3472021-10-13 21:20:07 +0100614 if (tflite::IsConstantTensor(&tfLiteInputTensor))
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100615 {
616 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
617 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000618 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100619 tfLiteContext,
620 IsConstantSupported,
621 delegateData.m_Backends,
622 isSupported,
623 inputTensorInfo);
624 if (!isSupported)
625 {
626 return kTfLiteError;
627 }
628 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
629 inputTensorInfo,
630 armnn::Optional<armnn::PermutationVector&>());
631 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
632 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
633 outputSlot.SetTensorInfo(inputTensorInfo);
634
Sadik Armagan67ac7fa2021-05-07 14:16:13 +0100635 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100636 }
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100637 }
638 return kTfLiteOk;
639}
640
Matthew Sloyand30bfb52021-04-18 16:40:00 +0100641unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
642{
643 int numDims = armnn::numeric_cast<int>(numDimensions);
644 int wrappedIndex = index < 0 ? numDims + index : index;
645 ARMNN_ASSERT(wrappedIndex >= 0);
646 ARMNN_ASSERT(wrappedIndex < numDims);
647
648 return static_cast<unsigned int>(wrappedIndex);
649};
650
Jim Flynn4b2f3472021-10-13 21:20:07 +0100651bool AreAllSigned32(const armnn::TensorInfo& inputInfo1,
652 const armnn::TensorInfo& inputInfo2,
653 const armnn::TensorInfo& outputInfo)
654{
655 return (armnn::DataType::Signed32 == inputInfo1.GetDataType()) &&
656 (armnn::DataType::Signed32 == inputInfo2.GetDataType()) &&
657 (armnn::DataType::Signed32 == outputInfo.GetDataType());
658}
659
Sadik Armagan90a119b2022-08-05 16:12:49 +0100660void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::TensorInfo& outputInfo)
661{
662 // If input tensor info is constant and output tensor info shape is not specified
663 // set the output shape from input shape
664 if (inputInfo.IsConstant() && outputInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
665 {
666 outputInfo.SetShape(inputInfo.GetShape());
667 }
668 return;
669}
670
Sadik Armagan62483be2020-10-23 17:14:43 +0100671} // namespace anonymous