blob: 8c7ba25e1565975a12c05bf53a4376f6a9190c07 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9#include <armnn/BackendHelper.hpp>
10#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000011#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010012
Sadik Armagan6e36a642020-11-10 21:18:41 +000013#include <armnnUtils/Permute.hpp>
14
Sadik Armagan62483be2020-10-23 17:14:43 +010015#include <tensorflow/lite/builtin_ops.h>
16#include <tensorflow/lite/c/builtin_op_data.h>
17#include <tensorflow/lite/c/common.h>
18#include <tensorflow/lite/minimal_logging.h>
19
Sadik Armagan05e9fd22020-11-17 12:01:47 +000020#include "tensorflow/lite/kernels/kernel_util.h"
21
Sadik Armagan62483be2020-10-23 17:14:43 +010022namespace
23{
24
25// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
26#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
27try \
28{ \
29 for (auto&& backendId : backends) \
30 { \
31 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000032 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010033 { \
34 std::string reasonIfUnsupported; \
35 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000036 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010037 if (supported) \
38 { \
39 break; \
40 } \
41 else \
42 { \
43 if (reasonIfUnsupported.size() > 0) \
44 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000045 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
46 "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010047 } \
48 else \
49 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000050 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
51 "%s: not supported by armnn", funcName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010052 } \
53 } \
54 } \
55 else \
56 { \
57 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
58 } \
59 } \
60 if (!supported) \
61 { \
62 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
63 } \
64} \
65catch (const armnn::InvalidArgumentException &e) \
66{ \
67 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
68}
69
70TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
71 TfLiteNode* tfLiteNode,
72 const unsigned int expectedSize,
73 int nodeIndex)
74{
75 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000076 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010077 {
78 TF_LITE_MAYBE_KERNEL_LOG(
79 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
80 numInputs, expectedSize, nodeIndex);
81 return kTfLiteError;
82 }
83 return kTfLiteOk;
84}
85
86TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
87 TfLiteNode* tfLiteNode,
88 const unsigned int expectedSize,
89 int nodeIndex)
90{
91 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000092 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010093 {
94 TF_LITE_MAYBE_KERNEL_LOG(
95 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
96 numOutputs, expectedSize, nodeIndex);
97 return kTfLiteError;
98 }
99 return kTfLiteOk;
100}
101
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000102bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
103{
104 auto tensorAllocationType = tfLiteTensor.allocation_type;
105 if (tensorAllocationType == kTfLiteDynamic)
106 {
107 return true;
108 }
109 return false;
110}
111
Sadik Armagan6e36a642020-11-10 21:18:41 +0000112bool IsValid(const TfLiteTensor* tfLiteTensor)
113{
114 return tfLiteTensor == nullptr ? false : true;
115}
116
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000117bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
118{
119 if(!IsValid(&tfLiteTensor))
120 {
121 std::cout << "..Is Not Valid" << std::endl;
122 TF_LITE_MAYBE_KERNEL_LOG(
123 tfLiteContext,
124 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
125 operatorCode, nodeIndex);
126 return false;
127 }
128 if (IsDynamicTensor(tfLiteTensor))
129 {
130 std::cout << "..IsDynamicTensor" << std::endl;
131 TF_LITE_MAYBE_KERNEL_LOG(
132 tfLiteContext,
133 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
134 operatorCode, nodeIndex);
135 return false;
136 }
137 return true;
138}
139
Sadik Armagan32ca1442020-11-13 17:51:56 +0000140uint32_t NonNegative(int32_t value, int nodeIndex)
141{
142 if (value < 0)
143 {
Keith Davis892fafe2020-11-26 17:40:35 +0000144 throw armnn::Exception(
145 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000146 }
147 else
148 {
149 return static_cast<uint32_t>(value);
150 }
151}
152
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000153bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
154{
155 auto quantizationInfo = tfLiteTensor.quantization;
156 if (quantizationInfo.type == kTfLiteAffineQuantization)
157 {
158 return true;
159 }
160 return false;
161}
162
Sadik Armagan67e95f22020-10-29 16:14:54 +0000163TfLiteStatus Connect(armnn::IConnectableLayer* layer,
164 TfLiteNode* tfLiteNode,
165 armnnDelegate::DelegateData& data)
166{
Keith Davis892fafe2020-11-26 17:40:35 +0000167 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000168
169 // Connect the input slots
170 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
171 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000172 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
173 {
174 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
175 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000176 }
177
178 // Prepare output slots
179 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
180 {
181 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000182 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000183 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000184
Sadik Armagan67e95f22020-10-29 16:14:54 +0000185 return kTfLiteOk;
186}
187
188armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
189 const armnn::TensorInfo& inputInfo1,
190 armnn::IConnectableLayer* startLayer,
191 TfLiteContext* tfLiteContext,
192 TfLiteNode* tfLiteNode,
193 armnnDelegate::DelegateData& delegateData)
194{
195 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
196 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
197
198 if (inputDimensions0 == inputDimensions1)
199 {
200 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000201 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000202 }
203
204 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000205 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
206 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000207
208 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
209 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
210 const armnn::TensorShape& smallShape = smallInfo.GetShape();
211
212 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
213 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
214 {
215 reshapedDimensions[i] = smallShape[i - dimDifference];
216 }
217
218 armnn::TensorInfo reshapedInfo = smallInfo;
219 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
220 reshapedDimensions.data() });
221
222 armnn::ReshapeDescriptor reshapeDescriptor;
223 bool isSupported = false;
224 FORWARD_LAYER_SUPPORT_FUNC(__func__,
225 tfLiteContext,
226 IsReshapeSupported,
227 delegateData.m_Backends,
228 isSupported,
229 smallInfo,
230 reshapedInfo,
231 reshapeDescriptor);
232 if (!isSupported)
233 {
234 return nullptr;
235 }
236
237 ARMNN_ASSERT(delegateData.m_Network != nullptr);
238 // Add Reshape layer
239 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
240
241 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
242 ARMNN_ASSERT(reshapeLayer != nullptr);
243 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
244
245 if (input0IsSmaller)
246 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000247 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
248 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000249 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000250 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
251 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000252 }
253 else
254 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000255 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
256 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000257 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000258 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
259 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000260 }
261
262 // Prepare output slots
263 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
264 {
265 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000266 delegateData.m_OutputSlotForNode
267 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000268 }
269
270 return reshapeLayer;
271}
272
273TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
274 TfLiteNode* tfLiteNode,
275 TfLiteFusedActivation activationType,
276 armnn::IConnectableLayer* prevLayer,
277 unsigned int outputSlotIndex,
278 armnnDelegate::DelegateData& data)
279{
280
Finn Williams6f9f9902020-11-13 13:23:15 +0000281 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000282
283 armnn::ActivationDescriptor activationDesc;
284
285 switch (activationType)
286 {
287 case kTfLiteActNone:
288 {
289 // No Activation
290 return kTfLiteOk;
291 }
292 case kTfLiteActRelu:
293 {
294 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
295 break;
296 }
297 case kTfLiteActRelu1:
298 {
299 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
300 activationDesc.m_A = 1.0f;
301 activationDesc.m_B = -1.0f;
302 break;
303 }
304 case kTfLiteActRelu6:
305 {
306 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
307 activationDesc.m_A = 6.0f;
308 activationDesc.m_B = 0.0f;
309 break;
310 }
311 case kTfLiteActSigmoid:
312 {
313 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
314 break;
315 }
316 case kTfLiteActTanh:
317 {
318 activationDesc.m_Function = armnn::ActivationFunction::TanH;
319 activationDesc.m_A = 1.0f;
320 activationDesc.m_B = 1.0f;
321 break;
322 }
323 default:
324 return kTfLiteError;
325 }
326
327 bool isSupported = false;
328 FORWARD_LAYER_SUPPORT_FUNC(__func__,
329 tfLiteContext,
330 IsActivationSupported,
331 data.m_Backends,
332 isSupported,
333 prevLayer->GetOutputSlot(0).GetTensorInfo(),
334 activationOutputInfo,
335 activationDesc);
336 if (!isSupported)
337 {
338 return kTfLiteError;
339 }
340 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
341
342 ARMNN_ASSERT(activationLayer != nullptr);
343 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
344
345 // Connect and prepare output slots
346 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
347 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000348 data.m_OutputSlotForNode[static_cast<unsigned long>(
349 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000350 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000351 data.m_OutputSlotForNode[static_cast<unsigned long>(
352 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000353 }
354 return kTfLiteOk;
355}
356
Sadik Armagan6e36a642020-11-10 21:18:41 +0000357armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100358{
Sadik Armagan62483be2020-10-23 17:14:43 +0100359 switch (tfLiteTensor.type)
360 {
361 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000362 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100363 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000364 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100365 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000366 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100367 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000368 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100369 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000370 {
371 auto quantizationInfo = tfLiteTensor.quantization;
372 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000373 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000374 auto* quantization =
375 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
376 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
377 {
378 return armnn::DataType::QAsymmS8;
379 }
380 else
381 {
382 return armnn::DataType::QSymmS8;
383 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000384 }
385 else
386 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000387 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000388 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000389 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100390 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000391 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100392 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000393 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100394 case kTfLiteInt64:
395 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100396 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000397 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100398 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000399}
Sadik Armagan62483be2020-10-23 17:14:43 +0100400
Jan Eilers7612bd62021-04-06 17:29:03 +0100401armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000402{
403 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100404 armnn::TensorInfo ret;
405 auto tensorDimensionSize = tfLiteTensor.dims->size;
406 if (tensorDimensionSize == 0)
407 {
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000408 if(tflite::IsConstantTensor(&tfLiteTensor))
409 {
410 std::vector<unsigned int> safeShape = { 1 };
411 bool dimensionsSpecificity[1] = { true };
412 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
413 safeShape.data(),
414 dimensionsSpecificity);
415 ret = armnn::TensorInfo(tensorShape, type);
416 }
417 else
418 {
419 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
420 ret = armnn::TensorInfo(tensorShape, type);
421 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100422 }
423 else
424 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000425 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100426 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000427 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100428 auto dim = tfLiteTensor.dims->data[i];
429 if (dim == 0)
430 {
431 dimensionsSpecificity[i] = false;
432 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000433 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100434 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000435 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
436 tensorDims.data(),
437 dimensionsSpecificity);
Sadik Armagan62483be2020-10-23 17:14:43 +0100438 ret = armnn::TensorInfo(tensorShape, type);
439 }
440
441 auto quantizationInfo = tfLiteTensor.quantization;
442 if (quantizationInfo.type == kTfLiteAffineQuantization)
443 {
444 // get per-channel quantization parameters
445 const auto* affineQuantization =
446 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000447 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100448 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000449 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000450 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000451 {
452 quantizationScales.push_back(affineQuantization->scale->data[i]);
453 }
454 ret.SetQuantizationScales(quantizationScales);
Jan Eilers7612bd62021-04-06 17:29:03 +0100455 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
Sadik Armagan62483be2020-10-23 17:14:43 +0100456 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000457 else
458 {
459 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
460 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
461 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100462 }
463 else
464 {
465 auto quantizationParameters = tfLiteTensor.params;
466 ret.SetQuantizationScale(quantizationParameters.scale);
467 ret.SetQuantizationOffset(quantizationParameters.zero_point);
468 }
469
470 return ret;
471}
472
Sadik Armagan4189cc52020-11-11 18:01:48 +0000473armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
474 armnn::TensorInfo& tensorInfo,
Jan Eilers53ef7952021-06-02 12:01:25 +0100475 armnn::Optional<armnn::PermutationVector&>
476 permutationVector = armnn::EmptyOptional(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000477 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000478{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000479 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
480 {
Keith Davis892fafe2020-11-26 17:40:35 +0000481 throw armnn::Exception(
482 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000483 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000484
Matthew Sloyan81beae32021-07-13 19:46:11 +0100485 if(tflite::IsConstantTensor(tfLiteTensor))
486 {
487 tensorInfo.SetConstant();
488 }
489
Sadik Armagan32ca1442020-11-13 17:51:56 +0000490 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000491 {
Jan Eilers7612bd62021-04-06 17:29:03 +0100492 // Permute tensor info
493 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
494 // then permute data using the shape from permuted tensor info
495 armnnUtils::Permute(tensorInfo.GetShape(),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000496 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000497 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000498 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000499 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000500
Jan Eilers7612bd62021-04-06 17:29:03 +0100501 return armnn::ConstTensor(tensorInfo, permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000502 }
503 else
504 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000505 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000506 }
507}
508
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100509armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
510{
511 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
512 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
513 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
514}
515
Sadik Armagan32ca1442020-11-13 17:51:56 +0000516void CalcPadding(uint32_t inputSize,
517 uint32_t filterSize,
518 uint32_t stride,
519 uint32_t dilation,
520 uint32_t& paddingFront,
521 uint32_t& paddingBack,
522 TfLitePadding padding)
523{
524 paddingFront = 0;
525 paddingBack = 0;
526 if (padding == kTfLitePaddingSame)
527 {
528 uint32_t outputSize = (inputSize + stride - 1) / stride;
529 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
530 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
531 if (temp > inputSize)
532 {
533 paddingFront = (temp - inputSize) / 2;
534 paddingBack = (temp - inputSize) - paddingFront;
535 }
536 }
537}
538
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000539TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
540 armnn::TensorInfo& constTensorInfo,
541 TfLiteContext* tfLiteContext,
542 const TfLiteTensor& tfLiteTensor,
543 armnnDelegate::DelegateData& data,
544 unsigned int slotIndex)
545{
Keith Davis892fafe2020-11-26 17:40:35 +0000546 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000547 bool isSupported = false;
548 FORWARD_LAYER_SUPPORT_FUNC(__func__,
549 tfLiteContext,
550 IsConstantSupported,
551 data.m_Backends,
552 isSupported,
553 constTensorInfo);
554 if (!isSupported)
555 {
556 return kTfLiteError;
557 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000558
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000559 auto constantInput = CreateConstTensor(&tfLiteTensor,
560 constTensorInfo,
561 armnn::Optional<armnn::PermutationVector&>());
562 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
563 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
564 outputSlot.SetTensorInfo(constTensorInfo);
565
566 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
567
568 return kTfLiteOk;
569}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000570
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100571bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
572{
573 if (tfLiteNode->inputs->data[operandIndex] < 0) {
574 return true;
575 }
576 return false;
577
578}
579
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100580TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
581 armnnDelegate::DelegateData& delegateData,
582 TfLiteContext* tfLiteContext,
583 TfLiteNode* tfLiteNode)
584{
585 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
586 // Process input tensors
587 // If input tensor is a Constant tensor create a constant layer and connect it to the network
588 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
589 {
590 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
591 if(tflite::IsConstantTensor(&tfLiteInputTensor))
592 {
593 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
594 bool isSupported = false;
595 FORWARD_LAYER_SUPPORT_FUNC(__func__,
596 tfLiteContext,
597 IsConstantSupported,
598 delegateData.m_Backends,
599 isSupported,
600 inputTensorInfo);
601 if (!isSupported)
602 {
603 return kTfLiteError;
604 }
605 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
606 inputTensorInfo,
607 armnn::Optional<armnn::PermutationVector&>());
608 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
609 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
610 outputSlot.SetTensorInfo(inputTensorInfo);
611
Sadik Armagan67ac7fa2021-05-07 14:16:13 +0100612 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100613 }
614
615 }
616 return kTfLiteOk;
617}
618
Matthew Sloyand30bfb52021-04-18 16:40:00 +0100619unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
620{
621 int numDims = armnn::numeric_cast<int>(numDimensions);
622 int wrappedIndex = index < 0 ? numDims + index : index;
623 ARMNN_ASSERT(wrappedIndex >= 0);
624 ARMNN_ASSERT(wrappedIndex < numDims);
625
626 return static_cast<unsigned int>(wrappedIndex);
627};
628
Sadik Armagan62483be2020-10-23 17:14:43 +0100629} // namespace anonymous