blob: e0ba1cf4e704ab04b7a42c73a6dd88876d62f75f [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include <armnn_delegate.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <armnn/ArmNN.hpp>
11#include <armnn/BackendHelper.hpp>
12#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000013#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010014
Sadik Armagan6e36a642020-11-10 21:18:41 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan62483be2020-10-23 17:14:43 +010017#include <tensorflow/lite/builtin_ops.h>
18#include <tensorflow/lite/c/builtin_op_data.h>
19#include <tensorflow/lite/c/common.h>
20#include <tensorflow/lite/minimal_logging.h>
21
Sadik Armagan05e9fd22020-11-17 12:01:47 +000022#include "tensorflow/lite/kernels/kernel_util.h"
23
Sadik Armagan62483be2020-10-23 17:14:43 +010024namespace
25{
26
27// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
Sadik Armaganbfa767c2022-02-09 14:58:03 +000028#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, ...) \
Sadik Armagan62483be2020-10-23 17:14:43 +010029try \
30{ \
31 for (auto&& backendId : backends) \
32 { \
33 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000034 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010035 { \
36 std::string reasonIfUnsupported; \
37 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000038 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010039 if (supported) \
40 { \
41 break; \
42 } \
43 else \
44 { \
45 if (reasonIfUnsupported.size() > 0) \
46 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000047 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000048 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010049 } \
50 else \
51 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000052 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000053 "%s: not supported by armnn", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010054 } \
55 } \
56 } \
57 else \
58 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000059 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010060 } \
61 } \
62 if (!supported) \
63 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000064 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010065 } \
66} \
67catch (const armnn::InvalidArgumentException &e) \
68{ \
69 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
70}
71
72TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
73 TfLiteNode* tfLiteNode,
74 const unsigned int expectedSize,
75 int nodeIndex)
76{
77 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000078 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010079 {
80 TF_LITE_MAYBE_KERNEL_LOG(
81 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
82 numInputs, expectedSize, nodeIndex);
83 return kTfLiteError;
84 }
85 return kTfLiteOk;
86}
87
88TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
89 TfLiteNode* tfLiteNode,
90 const unsigned int expectedSize,
91 int nodeIndex)
92{
93 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000094 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010095 {
96 TF_LITE_MAYBE_KERNEL_LOG(
97 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
98 numOutputs, expectedSize, nodeIndex);
99 return kTfLiteError;
100 }
101 return kTfLiteOk;
102}
103
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000104bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
105{
106 auto tensorAllocationType = tfLiteTensor.allocation_type;
107 if (tensorAllocationType == kTfLiteDynamic)
108 {
109 return true;
110 }
111 return false;
112}
113
Sadik Armagan6e36a642020-11-10 21:18:41 +0000114bool IsValid(const TfLiteTensor* tfLiteTensor)
115{
116 return tfLiteTensor == nullptr ? false : true;
117}
118
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000119bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
120{
121 if(!IsValid(&tfLiteTensor))
122 {
123 std::cout << "..Is Not Valid" << std::endl;
124 TF_LITE_MAYBE_KERNEL_LOG(
125 tfLiteContext,
126 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
127 operatorCode, nodeIndex);
128 return false;
129 }
130 if (IsDynamicTensor(tfLiteTensor))
131 {
132 std::cout << "..IsDynamicTensor" << std::endl;
133 TF_LITE_MAYBE_KERNEL_LOG(
134 tfLiteContext,
135 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
136 operatorCode, nodeIndex);
137 return false;
138 }
139 return true;
140}
141
Sadik Armagan32ca1442020-11-13 17:51:56 +0000142uint32_t NonNegative(int32_t value, int nodeIndex)
143{
144 if (value < 0)
145 {
Keith Davis892fafe2020-11-26 17:40:35 +0000146 throw armnn::Exception(
147 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000148 }
149 else
150 {
151 return static_cast<uint32_t>(value);
152 }
153}
154
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000155bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
156{
157 auto quantizationInfo = tfLiteTensor.quantization;
158 if (quantizationInfo.type == kTfLiteAffineQuantization)
159 {
160 return true;
161 }
162 return false;
163}
164
Sadik Armagan67e95f22020-10-29 16:14:54 +0000165TfLiteStatus Connect(armnn::IConnectableLayer* layer,
166 TfLiteNode* tfLiteNode,
167 armnnDelegate::DelegateData& data)
168{
Keith Davis892fafe2020-11-26 17:40:35 +0000169 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000170
171 // Connect the input slots
172 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
173 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000174 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
175 {
176 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
177 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000178 }
179
180 // Prepare output slots
181 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
182 {
183 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000184 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000185 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000186
Sadik Armagan67e95f22020-10-29 16:14:54 +0000187 return kTfLiteOk;
188}
189
190armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
191 const armnn::TensorInfo& inputInfo1,
192 armnn::IConnectableLayer* startLayer,
193 TfLiteContext* tfLiteContext,
194 TfLiteNode* tfLiteNode,
195 armnnDelegate::DelegateData& delegateData)
196{
197 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
198 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
199
200 if (inputDimensions0 == inputDimensions1)
201 {
202 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000203 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000204 }
205
206 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000207 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
208 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000209
210 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
211 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
212 const armnn::TensorShape& smallShape = smallInfo.GetShape();
213
214 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
215 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
216 {
217 reshapedDimensions[i] = smallShape[i - dimDifference];
218 }
219
220 armnn::TensorInfo reshapedInfo = smallInfo;
221 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
222 reshapedDimensions.data() });
223
224 armnn::ReshapeDescriptor reshapeDescriptor;
Sadik Armaganadeebaa2022-01-18 14:31:05 +0000225 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000226 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000227 FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000228 tfLiteContext,
229 IsReshapeSupported,
230 delegateData.m_Backends,
231 isSupported,
232 smallInfo,
233 reshapedInfo,
234 reshapeDescriptor);
235 if (!isSupported)
236 {
237 return nullptr;
238 }
239
240 ARMNN_ASSERT(delegateData.m_Network != nullptr);
241 // Add Reshape layer
Sadik Armagan67e95f22020-10-29 16:14:54 +0000242 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
243 ARMNN_ASSERT(reshapeLayer != nullptr);
244 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
245
246 if (input0IsSmaller)
247 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000248 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
249 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000250 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000251 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
252 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000253 }
254 else
255 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000256 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
257 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000258 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000259 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
260 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000261 }
262
263 // Prepare output slots
264 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
265 {
266 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000267 delegateData.m_OutputSlotForNode
268 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000269 }
270
271 return reshapeLayer;
272}
273
274TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
275 TfLiteNode* tfLiteNode,
276 TfLiteFusedActivation activationType,
277 armnn::IConnectableLayer* prevLayer,
278 unsigned int outputSlotIndex,
279 armnnDelegate::DelegateData& data)
280{
281
Finn Williams6f9f9902020-11-13 13:23:15 +0000282 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000283
284 armnn::ActivationDescriptor activationDesc;
285
286 switch (activationType)
287 {
288 case kTfLiteActNone:
289 {
290 // No Activation
291 return kTfLiteOk;
292 }
293 case kTfLiteActRelu:
294 {
295 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
296 break;
297 }
Keith Davis9a701c82021-09-28 16:43:24 +0100298// The name of kTfLiteActRelu1 changed after TF Lite v2.3
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100299#if defined(ARMNN_POST_TFLITE_2_3)
Keith Davis9a701c82021-09-28 16:43:24 +0100300 case kTfLiteActReluN1To1:
301#else
Sadik Armagan67e95f22020-10-29 16:14:54 +0000302 case kTfLiteActRelu1:
Keith Davis9a701c82021-09-28 16:43:24 +0100303#endif
Sadik Armagan67e95f22020-10-29 16:14:54 +0000304 {
305 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
306 activationDesc.m_A = 1.0f;
307 activationDesc.m_B = -1.0f;
308 break;
309 }
310 case kTfLiteActRelu6:
311 {
312 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
313 activationDesc.m_A = 6.0f;
314 activationDesc.m_B = 0.0f;
315 break;
316 }
317 case kTfLiteActSigmoid:
318 {
319 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
320 break;
321 }
322 case kTfLiteActTanh:
323 {
324 activationDesc.m_Function = armnn::ActivationFunction::TanH;
325 activationDesc.m_A = 1.0f;
326 activationDesc.m_B = 1.0f;
327 break;
328 }
329 default:
330 return kTfLiteError;
331 }
332
333 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000334 FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000335 tfLiteContext,
336 IsActivationSupported,
337 data.m_Backends,
338 isSupported,
339 prevLayer->GetOutputSlot(0).GetTensorInfo(),
340 activationOutputInfo,
341 activationDesc);
342 if (!isSupported)
343 {
344 return kTfLiteError;
345 }
346 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
347
348 ARMNN_ASSERT(activationLayer != nullptr);
349 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
350
351 // Connect and prepare output slots
352 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
353 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000354 data.m_OutputSlotForNode[static_cast<unsigned long>(
355 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000356 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000357 data.m_OutputSlotForNode[static_cast<unsigned long>(
358 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000359 }
360 return kTfLiteOk;
361}
362
Sadik Armagan6e36a642020-11-10 21:18:41 +0000363armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100364{
Sadik Armagan62483be2020-10-23 17:14:43 +0100365 switch (tfLiteTensor.type)
366 {
367 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000368 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100369 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000370 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100371 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000372 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100373 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000374 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100375 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000376 {
377 auto quantizationInfo = tfLiteTensor.quantization;
378 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000379 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000380 auto* quantization =
381 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
382 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
383 {
384 return armnn::DataType::QAsymmS8;
385 }
386 else
387 {
388 return armnn::DataType::QSymmS8;
389 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000390 }
391 else
392 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000393 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000394 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000395 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100396 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000397 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100398 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000399 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100400 case kTfLiteInt64:
401 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100402 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000403 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100404 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000405}
Sadik Armagan62483be2020-10-23 17:14:43 +0100406
Jan Eilers7612bd62021-04-06 17:29:03 +0100407armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000408{
409 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100410 armnn::TensorInfo ret;
411 auto tensorDimensionSize = tfLiteTensor.dims->size;
412 if (tensorDimensionSize == 0)
413 {
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000414 if(tflite::IsConstantTensor(&tfLiteTensor))
415 {
416 std::vector<unsigned int> safeShape = { 1 };
417 bool dimensionsSpecificity[1] = { true };
418 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
419 safeShape.data(),
420 dimensionsSpecificity);
421 ret = armnn::TensorInfo(tensorShape, type);
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100422 ret.SetConstant(true);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000423 }
424 else
425 {
426 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
427 ret = armnn::TensorInfo(tensorShape, type);
428 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100429 }
430 else
431 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000432 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100433 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000434 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100435 auto dim = tfLiteTensor.dims->data[i];
436 if (dim == 0)
437 {
438 dimensionsSpecificity[i] = false;
439 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000440 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100441 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000442 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
443 tensorDims.data(),
444 dimensionsSpecificity);
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100445
446 if(tflite::IsConstantTensor(&tfLiteTensor))
447 {
448 ret = armnn::TensorInfo(tensorShape, type);
449 ret.SetConstant(true);
450 }
451 else
452 {
453 ret = armnn::TensorInfo(tensorShape, type);
454 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100455 }
456
457 auto quantizationInfo = tfLiteTensor.quantization;
458 if (quantizationInfo.type == kTfLiteAffineQuantization)
459 {
460 // get per-channel quantization parameters
461 const auto* affineQuantization =
462 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000463 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100464 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000465 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000466 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000467 {
468 quantizationScales.push_back(affineQuantization->scale->data[i]);
469 }
470 ret.SetQuantizationScales(quantizationScales);
Jan Eilers7612bd62021-04-06 17:29:03 +0100471 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
Sadik Armagan62483be2020-10-23 17:14:43 +0100472 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000473 else
474 {
475 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
476 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
477 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100478 }
479 else
480 {
481 auto quantizationParameters = tfLiteTensor.params;
482 ret.SetQuantizationScale(quantizationParameters.scale);
483 ret.SetQuantizationOffset(quantizationParameters.zero_point);
484 }
485
486 return ret;
487}
488
Sadik Armagan4189cc52020-11-11 18:01:48 +0000489armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
490 armnn::TensorInfo& tensorInfo,
Jan Eilers53ef7952021-06-02 12:01:25 +0100491 armnn::Optional<armnn::PermutationVector&>
492 permutationVector = armnn::EmptyOptional(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000493 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000494{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000495 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
496 {
Keith Davis892fafe2020-11-26 17:40:35 +0000497 throw armnn::Exception(
498 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000499 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000500
Matthew Sloyan81beae32021-07-13 19:46:11 +0100501 if(tflite::IsConstantTensor(tfLiteTensor))
502 {
503 tensorInfo.SetConstant();
504 }
505
Sadik Armagan32ca1442020-11-13 17:51:56 +0000506 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000507 {
Jan Eilers7612bd62021-04-06 17:29:03 +0100508 // Permute tensor info
509 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
510 // then permute data using the shape from permuted tensor info
511 armnnUtils::Permute(tensorInfo.GetShape(),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000512 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000513 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000514 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000515 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000516
Jan Eilers7612bd62021-04-06 17:29:03 +0100517 return armnn::ConstTensor(tensorInfo, permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000518 }
519 else
520 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000521 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000522 }
523}
524
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100525armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
526{
527 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
528 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
529 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
530}
531
Sadik Armagan32ca1442020-11-13 17:51:56 +0000532void CalcPadding(uint32_t inputSize,
533 uint32_t filterSize,
534 uint32_t stride,
535 uint32_t dilation,
536 uint32_t& paddingFront,
537 uint32_t& paddingBack,
538 TfLitePadding padding)
539{
540 paddingFront = 0;
541 paddingBack = 0;
542 if (padding == kTfLitePaddingSame)
543 {
544 uint32_t outputSize = (inputSize + stride - 1) / stride;
545 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
546 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
547 if (temp > inputSize)
548 {
549 paddingFront = (temp - inputSize) / 2;
550 paddingBack = (temp - inputSize) - paddingFront;
551 }
552 }
553}
554
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000555TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
556 armnn::TensorInfo& constTensorInfo,
557 TfLiteContext* tfLiteContext,
558 const TfLiteTensor& tfLiteTensor,
559 armnnDelegate::DelegateData& data,
560 unsigned int slotIndex)
561{
Keith Davis892fafe2020-11-26 17:40:35 +0000562 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000563 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000564 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000565 tfLiteContext,
566 IsConstantSupported,
567 data.m_Backends,
568 isSupported,
569 constTensorInfo);
570 if (!isSupported)
571 {
572 return kTfLiteError;
573 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000574
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000575 auto constantInput = CreateConstTensor(&tfLiteTensor,
576 constTensorInfo,
577 armnn::Optional<armnn::PermutationVector&>());
578 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
579 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
580 outputSlot.SetTensorInfo(constTensorInfo);
581
582 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
583
584 return kTfLiteOk;
585}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000586
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100587bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
588{
589 if (tfLiteNode->inputs->data[operandIndex] < 0) {
590 return true;
591 }
592 return false;
593
594}
595
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100596TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
597 armnnDelegate::DelegateData& delegateData,
598 TfLiteContext* tfLiteContext,
599 TfLiteNode* tfLiteNode)
600{
601 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
602 // Process input tensors
603 // If input tensor is a Constant tensor create a constant layer and connect it to the network
604 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
605 {
606 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
Jim Flynn4b2f3472021-10-13 21:20:07 +0100607 if (tflite::IsConstantTensor(&tfLiteInputTensor))
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100608 {
609 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
610 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000611 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100612 tfLiteContext,
613 IsConstantSupported,
614 delegateData.m_Backends,
615 isSupported,
616 inputTensorInfo);
617 if (!isSupported)
618 {
619 return kTfLiteError;
620 }
621 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
622 inputTensorInfo,
623 armnn::Optional<armnn::PermutationVector&>());
624 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
625 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
626 outputSlot.SetTensorInfo(inputTensorInfo);
627
Sadik Armagan67ac7fa2021-05-07 14:16:13 +0100628 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100629 }
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100630 }
631 return kTfLiteOk;
632}
633
Matthew Sloyand30bfb52021-04-18 16:40:00 +0100634unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
635{
636 int numDims = armnn::numeric_cast<int>(numDimensions);
637 int wrappedIndex = index < 0 ? numDims + index : index;
638 ARMNN_ASSERT(wrappedIndex >= 0);
639 ARMNN_ASSERT(wrappedIndex < numDims);
640
641 return static_cast<unsigned int>(wrappedIndex);
642};
643
Jim Flynn4b2f3472021-10-13 21:20:07 +0100644bool AreAllSigned32(const armnn::TensorInfo& inputInfo1,
645 const armnn::TensorInfo& inputInfo2,
646 const armnn::TensorInfo& outputInfo)
647{
648 return (armnn::DataType::Signed32 == inputInfo1.GetDataType()) &&
649 (armnn::DataType::Signed32 == inputInfo2.GetDataType()) &&
650 (armnn::DataType::Signed32 == outputInfo.GetDataType());
651}
652
Sadik Armagan62483be2020-10-23 17:14:43 +0100653} // namespace anonymous