blob: e408dba1387728eb2d4dbc1d0373086b363dcda1 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9#include <armnn/BackendHelper.hpp>
10#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000011#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010012
Sadik Armagan6e36a642020-11-10 21:18:41 +000013#include <armnnUtils/Permute.hpp>
14
Sadik Armagan62483be2020-10-23 17:14:43 +010015#include <tensorflow/lite/builtin_ops.h>
16#include <tensorflow/lite/c/builtin_op_data.h>
17#include <tensorflow/lite/c/common.h>
18#include <tensorflow/lite/minimal_logging.h>
Keith Davis9a701c82021-09-28 16:43:24 +010019#include <tensorflow/lite/version.h>
Sadik Armagan62483be2020-10-23 17:14:43 +010020
Sadik Armagan05e9fd22020-11-17 12:01:47 +000021#include "tensorflow/lite/kernels/kernel_util.h"
22
Sadik Armagan62483be2020-10-23 17:14:43 +010023namespace
24{
25
26// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
27#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
28try \
29{ \
30 for (auto&& backendId : backends) \
31 { \
32 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000033 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010034 { \
35 std::string reasonIfUnsupported; \
36 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000037 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010038 if (supported) \
39 { \
40 break; \
41 } \
42 else \
43 { \
44 if (reasonIfUnsupported.size() > 0) \
45 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000046 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
47 "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010048 } \
49 else \
50 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000051 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
52 "%s: not supported by armnn", funcName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010053 } \
54 } \
55 } \
56 else \
57 { \
58 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
59 } \
60 } \
61 if (!supported) \
62 { \
63 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
64 } \
65} \
66catch (const armnn::InvalidArgumentException &e) \
67{ \
68 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
69}
70
71TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
72 TfLiteNode* tfLiteNode,
73 const unsigned int expectedSize,
74 int nodeIndex)
75{
76 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000077 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010078 {
79 TF_LITE_MAYBE_KERNEL_LOG(
80 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
81 numInputs, expectedSize, nodeIndex);
82 return kTfLiteError;
83 }
84 return kTfLiteOk;
85}
86
87TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
88 TfLiteNode* tfLiteNode,
89 const unsigned int expectedSize,
90 int nodeIndex)
91{
92 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000093 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010094 {
95 TF_LITE_MAYBE_KERNEL_LOG(
96 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
97 numOutputs, expectedSize, nodeIndex);
98 return kTfLiteError;
99 }
100 return kTfLiteOk;
101}
102
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000103bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
104{
105 auto tensorAllocationType = tfLiteTensor.allocation_type;
106 if (tensorAllocationType == kTfLiteDynamic)
107 {
108 return true;
109 }
110 return false;
111}
112
Sadik Armagan6e36a642020-11-10 21:18:41 +0000113bool IsValid(const TfLiteTensor* tfLiteTensor)
114{
115 return tfLiteTensor == nullptr ? false : true;
116}
117
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000118bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
119{
120 if(!IsValid(&tfLiteTensor))
121 {
122 std::cout << "..Is Not Valid" << std::endl;
123 TF_LITE_MAYBE_KERNEL_LOG(
124 tfLiteContext,
125 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
126 operatorCode, nodeIndex);
127 return false;
128 }
129 if (IsDynamicTensor(tfLiteTensor))
130 {
131 std::cout << "..IsDynamicTensor" << std::endl;
132 TF_LITE_MAYBE_KERNEL_LOG(
133 tfLiteContext,
134 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
135 operatorCode, nodeIndex);
136 return false;
137 }
138 return true;
139}
140
Sadik Armagan32ca1442020-11-13 17:51:56 +0000141uint32_t NonNegative(int32_t value, int nodeIndex)
142{
143 if (value < 0)
144 {
Keith Davis892fafe2020-11-26 17:40:35 +0000145 throw armnn::Exception(
146 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000147 }
148 else
149 {
150 return static_cast<uint32_t>(value);
151 }
152}
153
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000154bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
155{
156 auto quantizationInfo = tfLiteTensor.quantization;
157 if (quantizationInfo.type == kTfLiteAffineQuantization)
158 {
159 return true;
160 }
161 return false;
162}
163
Sadik Armagan67e95f22020-10-29 16:14:54 +0000164TfLiteStatus Connect(armnn::IConnectableLayer* layer,
165 TfLiteNode* tfLiteNode,
166 armnnDelegate::DelegateData& data)
167{
Keith Davis892fafe2020-11-26 17:40:35 +0000168 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000169
170 // Connect the input slots
171 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
172 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000173 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
174 {
175 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
176 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000177 }
178
179 // Prepare output slots
180 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
181 {
182 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000183 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000184 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000185
Sadik Armagan67e95f22020-10-29 16:14:54 +0000186 return kTfLiteOk;
187}
188
189armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
190 const armnn::TensorInfo& inputInfo1,
191 armnn::IConnectableLayer* startLayer,
192 TfLiteContext* tfLiteContext,
193 TfLiteNode* tfLiteNode,
194 armnnDelegate::DelegateData& delegateData)
195{
196 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
197 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
198
199 if (inputDimensions0 == inputDimensions1)
200 {
201 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000202 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000203 }
204
205 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000206 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
207 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000208
209 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
210 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
211 const armnn::TensorShape& smallShape = smallInfo.GetShape();
212
213 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
214 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
215 {
216 reshapedDimensions[i] = smallShape[i - dimDifference];
217 }
218
219 armnn::TensorInfo reshapedInfo = smallInfo;
220 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
221 reshapedDimensions.data() });
222
223 armnn::ReshapeDescriptor reshapeDescriptor;
224 bool isSupported = false;
225 FORWARD_LAYER_SUPPORT_FUNC(__func__,
226 tfLiteContext,
227 IsReshapeSupported,
228 delegateData.m_Backends,
229 isSupported,
230 smallInfo,
231 reshapedInfo,
232 reshapeDescriptor);
233 if (!isSupported)
234 {
235 return nullptr;
236 }
237
238 ARMNN_ASSERT(delegateData.m_Network != nullptr);
239 // Add Reshape layer
240 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
241
242 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
243 ARMNN_ASSERT(reshapeLayer != nullptr);
244 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
245
246 if (input0IsSmaller)
247 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000248 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
249 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000250 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000251 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
252 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000253 }
254 else
255 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000256 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
257 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000258 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000259 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
260 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000261 }
262
263 // Prepare output slots
264 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
265 {
266 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000267 delegateData.m_OutputSlotForNode
268 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000269 }
270
271 return reshapeLayer;
272}
273
274TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
275 TfLiteNode* tfLiteNode,
276 TfLiteFusedActivation activationType,
277 armnn::IConnectableLayer* prevLayer,
278 unsigned int outputSlotIndex,
279 armnnDelegate::DelegateData& data)
280{
281
Finn Williams6f9f9902020-11-13 13:23:15 +0000282 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000283
284 armnn::ActivationDescriptor activationDesc;
285
286 switch (activationType)
287 {
288 case kTfLiteActNone:
289 {
290 // No Activation
291 return kTfLiteOk;
292 }
293 case kTfLiteActRelu:
294 {
295 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
296 break;
297 }
Keith Davis9a701c82021-09-28 16:43:24 +0100298// The name of kTfLiteActRelu1 changed after TF Lite v2.3
299#if TF_MAJOR_VERSION > 2 || (TF_MAJOR_VERSION == 2 && TF_MINOR_VERSION > 3)
300 case kTfLiteActReluN1To1:
301#else
Sadik Armagan67e95f22020-10-29 16:14:54 +0000302 case kTfLiteActRelu1:
Keith Davis9a701c82021-09-28 16:43:24 +0100303#endif
Sadik Armagan67e95f22020-10-29 16:14:54 +0000304 {
305 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
306 activationDesc.m_A = 1.0f;
307 activationDesc.m_B = -1.0f;
308 break;
309 }
310 case kTfLiteActRelu6:
311 {
312 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
313 activationDesc.m_A = 6.0f;
314 activationDesc.m_B = 0.0f;
315 break;
316 }
317 case kTfLiteActSigmoid:
318 {
319 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
320 break;
321 }
322 case kTfLiteActTanh:
323 {
324 activationDesc.m_Function = armnn::ActivationFunction::TanH;
325 activationDesc.m_A = 1.0f;
326 activationDesc.m_B = 1.0f;
327 break;
328 }
329 default:
330 return kTfLiteError;
331 }
332
333 bool isSupported = false;
334 FORWARD_LAYER_SUPPORT_FUNC(__func__,
335 tfLiteContext,
336 IsActivationSupported,
337 data.m_Backends,
338 isSupported,
339 prevLayer->GetOutputSlot(0).GetTensorInfo(),
340 activationOutputInfo,
341 activationDesc);
342 if (!isSupported)
343 {
344 return kTfLiteError;
345 }
346 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
347
348 ARMNN_ASSERT(activationLayer != nullptr);
349 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
350
351 // Connect and prepare output slots
352 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
353 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000354 data.m_OutputSlotForNode[static_cast<unsigned long>(
355 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000356 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000357 data.m_OutputSlotForNode[static_cast<unsigned long>(
358 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000359 }
360 return kTfLiteOk;
361}
362
Sadik Armagan6e36a642020-11-10 21:18:41 +0000363armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100364{
Sadik Armagan62483be2020-10-23 17:14:43 +0100365 switch (tfLiteTensor.type)
366 {
367 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000368 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100369 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000370 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100371 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000372 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100373 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000374 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100375 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000376 {
377 auto quantizationInfo = tfLiteTensor.quantization;
378 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000379 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000380 auto* quantization =
381 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
382 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
383 {
384 return armnn::DataType::QAsymmS8;
385 }
386 else
387 {
388 return armnn::DataType::QSymmS8;
389 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000390 }
391 else
392 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000393 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000394 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000395 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100396 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000397 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100398 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000399 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100400 case kTfLiteInt64:
401 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100402 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000403 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100404 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000405}
Sadik Armagan62483be2020-10-23 17:14:43 +0100406
Jan Eilers7612bd62021-04-06 17:29:03 +0100407armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000408{
409 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100410 armnn::TensorInfo ret;
411 auto tensorDimensionSize = tfLiteTensor.dims->size;
412 if (tensorDimensionSize == 0)
413 {
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000414 if(tflite::IsConstantTensor(&tfLiteTensor))
415 {
416 std::vector<unsigned int> safeShape = { 1 };
417 bool dimensionsSpecificity[1] = { true };
418 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
419 safeShape.data(),
420 dimensionsSpecificity);
421 ret = armnn::TensorInfo(tensorShape, type);
422 }
423 else
424 {
425 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
426 ret = armnn::TensorInfo(tensorShape, type);
427 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100428 }
429 else
430 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000431 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100432 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000433 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100434 auto dim = tfLiteTensor.dims->data[i];
435 if (dim == 0)
436 {
437 dimensionsSpecificity[i] = false;
438 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000439 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100440 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000441 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
442 tensorDims.data(),
443 dimensionsSpecificity);
Sadik Armagan62483be2020-10-23 17:14:43 +0100444 ret = armnn::TensorInfo(tensorShape, type);
445 }
446
447 auto quantizationInfo = tfLiteTensor.quantization;
448 if (quantizationInfo.type == kTfLiteAffineQuantization)
449 {
450 // get per-channel quantization parameters
451 const auto* affineQuantization =
452 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000453 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100454 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000455 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000456 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000457 {
458 quantizationScales.push_back(affineQuantization->scale->data[i]);
459 }
460 ret.SetQuantizationScales(quantizationScales);
Jan Eilers7612bd62021-04-06 17:29:03 +0100461 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
Sadik Armagan62483be2020-10-23 17:14:43 +0100462 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000463 else
464 {
465 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
466 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
467 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100468 }
469 else
470 {
471 auto quantizationParameters = tfLiteTensor.params;
472 ret.SetQuantizationScale(quantizationParameters.scale);
473 ret.SetQuantizationOffset(quantizationParameters.zero_point);
474 }
475
476 return ret;
477}
478
Sadik Armagan4189cc52020-11-11 18:01:48 +0000479armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
480 armnn::TensorInfo& tensorInfo,
Jan Eilers53ef7952021-06-02 12:01:25 +0100481 armnn::Optional<armnn::PermutationVector&>
482 permutationVector = armnn::EmptyOptional(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000483 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000484{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000485 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
486 {
Keith Davis892fafe2020-11-26 17:40:35 +0000487 throw armnn::Exception(
488 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000489 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000490
Matthew Sloyan81beae32021-07-13 19:46:11 +0100491 if(tflite::IsConstantTensor(tfLiteTensor))
492 {
493 tensorInfo.SetConstant();
494 }
495
Sadik Armagan32ca1442020-11-13 17:51:56 +0000496 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000497 {
Jan Eilers7612bd62021-04-06 17:29:03 +0100498 // Permute tensor info
499 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
500 // then permute data using the shape from permuted tensor info
501 armnnUtils::Permute(tensorInfo.GetShape(),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000502 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000503 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000504 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000505 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000506
Jan Eilers7612bd62021-04-06 17:29:03 +0100507 return armnn::ConstTensor(tensorInfo, permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000508 }
509 else
510 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000511 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000512 }
513}
514
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100515armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
516{
517 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
518 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
519 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
520}
521
Sadik Armagan32ca1442020-11-13 17:51:56 +0000522void CalcPadding(uint32_t inputSize,
523 uint32_t filterSize,
524 uint32_t stride,
525 uint32_t dilation,
526 uint32_t& paddingFront,
527 uint32_t& paddingBack,
528 TfLitePadding padding)
529{
530 paddingFront = 0;
531 paddingBack = 0;
532 if (padding == kTfLitePaddingSame)
533 {
534 uint32_t outputSize = (inputSize + stride - 1) / stride;
535 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
536 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
537 if (temp > inputSize)
538 {
539 paddingFront = (temp - inputSize) / 2;
540 paddingBack = (temp - inputSize) - paddingFront;
541 }
542 }
543}
544
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000545TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
546 armnn::TensorInfo& constTensorInfo,
547 TfLiteContext* tfLiteContext,
548 const TfLiteTensor& tfLiteTensor,
549 armnnDelegate::DelegateData& data,
550 unsigned int slotIndex)
551{
Keith Davis892fafe2020-11-26 17:40:35 +0000552 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000553 bool isSupported = false;
554 FORWARD_LAYER_SUPPORT_FUNC(__func__,
555 tfLiteContext,
556 IsConstantSupported,
557 data.m_Backends,
558 isSupported,
559 constTensorInfo);
560 if (!isSupported)
561 {
562 return kTfLiteError;
563 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000564
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000565 auto constantInput = CreateConstTensor(&tfLiteTensor,
566 constTensorInfo,
567 armnn::Optional<armnn::PermutationVector&>());
568 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
569 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
570 outputSlot.SetTensorInfo(constTensorInfo);
571
572 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
573
574 return kTfLiteOk;
575}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000576
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100577bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
578{
579 if (tfLiteNode->inputs->data[operandIndex] < 0) {
580 return true;
581 }
582 return false;
583
584}
585
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100586TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
587 armnnDelegate::DelegateData& delegateData,
588 TfLiteContext* tfLiteContext,
589 TfLiteNode* tfLiteNode)
590{
591 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
592 // Process input tensors
593 // If input tensor is a Constant tensor create a constant layer and connect it to the network
594 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
595 {
596 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
597 if(tflite::IsConstantTensor(&tfLiteInputTensor))
598 {
599 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
600 bool isSupported = false;
601 FORWARD_LAYER_SUPPORT_FUNC(__func__,
602 tfLiteContext,
603 IsConstantSupported,
604 delegateData.m_Backends,
605 isSupported,
606 inputTensorInfo);
607 if (!isSupported)
608 {
609 return kTfLiteError;
610 }
611 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
612 inputTensorInfo,
613 armnn::Optional<armnn::PermutationVector&>());
614 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
615 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
616 outputSlot.SetTensorInfo(inputTensorInfo);
617
Sadik Armagan67ac7fa2021-05-07 14:16:13 +0100618 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100619 }
620
621 }
622 return kTfLiteOk;
623}
624
Matthew Sloyand30bfb52021-04-18 16:40:00 +0100625unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
626{
627 int numDims = armnn::numeric_cast<int>(numDimensions);
628 int wrappedIndex = index < 0 ? numDims + index : index;
629 ARMNN_ASSERT(wrappedIndex >= 0);
630 ARMNN_ASSERT(wrappedIndex < numDims);
631
632 return static_cast<unsigned int>(wrappedIndex);
633};
634
Sadik Armagan62483be2020-10-23 17:14:43 +0100635} // namespace anonymous