blob: 940d269c5b07d08d4eca5636298168eab070b2b9 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include <armnn_delegate.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <armnn/ArmNN.hpp>
11#include <armnn/BackendHelper.hpp>
12#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000013#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010014
Sadik Armagan6e36a642020-11-10 21:18:41 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan62483be2020-10-23 17:14:43 +010017#include <tensorflow/lite/builtin_ops.h>
18#include <tensorflow/lite/c/builtin_op_data.h>
19#include <tensorflow/lite/c/common.h>
20#include <tensorflow/lite/minimal_logging.h>
21
Sadik Armagan05e9fd22020-11-17 12:01:47 +000022#include "tensorflow/lite/kernels/kernel_util.h"
23
Sadik Armagan62483be2020-10-23 17:14:43 +010024namespace
25{
26
27// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
28#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
29try \
30{ \
31 for (auto&& backendId : backends) \
32 { \
33 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000034 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010035 { \
36 std::string reasonIfUnsupported; \
37 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000038 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010039 if (supported) \
40 { \
41 break; \
42 } \
43 else \
44 { \
45 if (reasonIfUnsupported.size() > 0) \
46 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000047 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
48 "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010049 } \
50 else \
51 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000052 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
53 "%s: not supported by armnn", funcName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010054 } \
55 } \
56 } \
57 else \
58 { \
59 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
60 } \
61 } \
62 if (!supported) \
63 { \
64 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
65 } \
66} \
67catch (const armnn::InvalidArgumentException &e) \
68{ \
69 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
70}
71
72TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
73 TfLiteNode* tfLiteNode,
74 const unsigned int expectedSize,
75 int nodeIndex)
76{
77 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000078 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010079 {
80 TF_LITE_MAYBE_KERNEL_LOG(
81 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
82 numInputs, expectedSize, nodeIndex);
83 return kTfLiteError;
84 }
85 return kTfLiteOk;
86}
87
88TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
89 TfLiteNode* tfLiteNode,
90 const unsigned int expectedSize,
91 int nodeIndex)
92{
93 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000094 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010095 {
96 TF_LITE_MAYBE_KERNEL_LOG(
97 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
98 numOutputs, expectedSize, nodeIndex);
99 return kTfLiteError;
100 }
101 return kTfLiteOk;
102}
103
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000104bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
105{
106 auto tensorAllocationType = tfLiteTensor.allocation_type;
107 if (tensorAllocationType == kTfLiteDynamic)
108 {
109 return true;
110 }
111 return false;
112}
113
Sadik Armagan6e36a642020-11-10 21:18:41 +0000114bool IsValid(const TfLiteTensor* tfLiteTensor)
115{
116 return tfLiteTensor == nullptr ? false : true;
117}
118
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000119bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
120{
121 if(!IsValid(&tfLiteTensor))
122 {
123 std::cout << "..Is Not Valid" << std::endl;
124 TF_LITE_MAYBE_KERNEL_LOG(
125 tfLiteContext,
126 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
127 operatorCode, nodeIndex);
128 return false;
129 }
130 if (IsDynamicTensor(tfLiteTensor))
131 {
132 std::cout << "..IsDynamicTensor" << std::endl;
133 TF_LITE_MAYBE_KERNEL_LOG(
134 tfLiteContext,
135 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
136 operatorCode, nodeIndex);
137 return false;
138 }
139 return true;
140}
141
Sadik Armagan32ca1442020-11-13 17:51:56 +0000142uint32_t NonNegative(int32_t value, int nodeIndex)
143{
144 if (value < 0)
145 {
Keith Davis892fafe2020-11-26 17:40:35 +0000146 throw armnn::Exception(
147 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000148 }
149 else
150 {
151 return static_cast<uint32_t>(value);
152 }
153}
154
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000155bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
156{
157 auto quantizationInfo = tfLiteTensor.quantization;
158 if (quantizationInfo.type == kTfLiteAffineQuantization)
159 {
160 return true;
161 }
162 return false;
163}
164
Sadik Armagan67e95f22020-10-29 16:14:54 +0000165TfLiteStatus Connect(armnn::IConnectableLayer* layer,
166 TfLiteNode* tfLiteNode,
167 armnnDelegate::DelegateData& data)
168{
Keith Davis892fafe2020-11-26 17:40:35 +0000169 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000170
171 // Connect the input slots
172 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
173 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000174 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
175 {
176 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
177 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000178 }
179
180 // Prepare output slots
181 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
182 {
183 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000184 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000185 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000186
Sadik Armagan67e95f22020-10-29 16:14:54 +0000187 return kTfLiteOk;
188}
189
190armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
191 const armnn::TensorInfo& inputInfo1,
192 armnn::IConnectableLayer* startLayer,
193 TfLiteContext* tfLiteContext,
194 TfLiteNode* tfLiteNode,
195 armnnDelegate::DelegateData& delegateData)
196{
197 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
198 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
199
200 if (inputDimensions0 == inputDimensions1)
201 {
202 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000203 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000204 }
205
206 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000207 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
208 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000209
210 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
211 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
212 const armnn::TensorShape& smallShape = smallInfo.GetShape();
213
214 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
215 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
216 {
217 reshapedDimensions[i] = smallShape[i - dimDifference];
218 }
219
220 armnn::TensorInfo reshapedInfo = smallInfo;
221 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
222 reshapedDimensions.data() });
223
224 armnn::ReshapeDescriptor reshapeDescriptor;
225 bool isSupported = false;
226 FORWARD_LAYER_SUPPORT_FUNC(__func__,
227 tfLiteContext,
228 IsReshapeSupported,
229 delegateData.m_Backends,
230 isSupported,
231 smallInfo,
232 reshapedInfo,
233 reshapeDescriptor);
234 if (!isSupported)
235 {
236 return nullptr;
237 }
238
239 ARMNN_ASSERT(delegateData.m_Network != nullptr);
240 // Add Reshape layer
241 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
242
243 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
244 ARMNN_ASSERT(reshapeLayer != nullptr);
245 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
246
247 if (input0IsSmaller)
248 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000249 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
250 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000251 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000252 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
253 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000254 }
255 else
256 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000257 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
258 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000259 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000260 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
261 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000262 }
263
264 // Prepare output slots
265 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
266 {
267 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000268 delegateData.m_OutputSlotForNode
269 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000270 }
271
272 return reshapeLayer;
273}
274
275TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
276 TfLiteNode* tfLiteNode,
277 TfLiteFusedActivation activationType,
278 armnn::IConnectableLayer* prevLayer,
279 unsigned int outputSlotIndex,
280 armnnDelegate::DelegateData& data)
281{
282
Finn Williams6f9f9902020-11-13 13:23:15 +0000283 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000284
285 armnn::ActivationDescriptor activationDesc;
286
287 switch (activationType)
288 {
289 case kTfLiteActNone:
290 {
291 // No Activation
292 return kTfLiteOk;
293 }
294 case kTfLiteActRelu:
295 {
296 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
297 break;
298 }
Keith Davis9a701c82021-09-28 16:43:24 +0100299// The name of kTfLiteActRelu1 changed after TF Lite v2.3
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100300#if defined(ARMNN_POST_TFLITE_2_3)
Keith Davis9a701c82021-09-28 16:43:24 +0100301 case kTfLiteActReluN1To1:
302#else
Sadik Armagan67e95f22020-10-29 16:14:54 +0000303 case kTfLiteActRelu1:
Keith Davis9a701c82021-09-28 16:43:24 +0100304#endif
Sadik Armagan67e95f22020-10-29 16:14:54 +0000305 {
306 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
307 activationDesc.m_A = 1.0f;
308 activationDesc.m_B = -1.0f;
309 break;
310 }
311 case kTfLiteActRelu6:
312 {
313 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
314 activationDesc.m_A = 6.0f;
315 activationDesc.m_B = 0.0f;
316 break;
317 }
318 case kTfLiteActSigmoid:
319 {
320 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
321 break;
322 }
323 case kTfLiteActTanh:
324 {
325 activationDesc.m_Function = armnn::ActivationFunction::TanH;
326 activationDesc.m_A = 1.0f;
327 activationDesc.m_B = 1.0f;
328 break;
329 }
330 default:
331 return kTfLiteError;
332 }
333
334 bool isSupported = false;
335 FORWARD_LAYER_SUPPORT_FUNC(__func__,
336 tfLiteContext,
337 IsActivationSupported,
338 data.m_Backends,
339 isSupported,
340 prevLayer->GetOutputSlot(0).GetTensorInfo(),
341 activationOutputInfo,
342 activationDesc);
343 if (!isSupported)
344 {
345 return kTfLiteError;
346 }
347 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
348
349 ARMNN_ASSERT(activationLayer != nullptr);
350 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
351
352 // Connect and prepare output slots
353 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
354 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000355 data.m_OutputSlotForNode[static_cast<unsigned long>(
356 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000357 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000358 data.m_OutputSlotForNode[static_cast<unsigned long>(
359 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000360 }
361 return kTfLiteOk;
362}
363
Sadik Armagan6e36a642020-11-10 21:18:41 +0000364armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100365{
Sadik Armagan62483be2020-10-23 17:14:43 +0100366 switch (tfLiteTensor.type)
367 {
368 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000369 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100370 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000371 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100372 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000373 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100374 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000375 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100376 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000377 {
378 auto quantizationInfo = tfLiteTensor.quantization;
379 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000380 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000381 auto* quantization =
382 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
383 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
384 {
385 return armnn::DataType::QAsymmS8;
386 }
387 else
388 {
389 return armnn::DataType::QSymmS8;
390 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000391 }
392 else
393 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000394 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000395 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000396 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100397 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000398 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100399 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000400 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100401 case kTfLiteInt64:
402 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100403 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000404 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100405 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000406}
Sadik Armagan62483be2020-10-23 17:14:43 +0100407
Jan Eilers7612bd62021-04-06 17:29:03 +0100408armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000409{
410 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100411 armnn::TensorInfo ret;
412 auto tensorDimensionSize = tfLiteTensor.dims->size;
413 if (tensorDimensionSize == 0)
414 {
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000415 if(tflite::IsConstantTensor(&tfLiteTensor))
416 {
417 std::vector<unsigned int> safeShape = { 1 };
418 bool dimensionsSpecificity[1] = { true };
419 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
420 safeShape.data(),
421 dimensionsSpecificity);
422 ret = armnn::TensorInfo(tensorShape, type);
423 }
424 else
425 {
426 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
427 ret = armnn::TensorInfo(tensorShape, type);
428 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100429 }
430 else
431 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000432 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100433 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000434 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100435 auto dim = tfLiteTensor.dims->data[i];
436 if (dim == 0)
437 {
438 dimensionsSpecificity[i] = false;
439 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000440 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100441 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000442 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
443 tensorDims.data(),
444 dimensionsSpecificity);
Sadik Armagan62483be2020-10-23 17:14:43 +0100445 ret = armnn::TensorInfo(tensorShape, type);
446 }
447
448 auto quantizationInfo = tfLiteTensor.quantization;
449 if (quantizationInfo.type == kTfLiteAffineQuantization)
450 {
451 // get per-channel quantization parameters
452 const auto* affineQuantization =
453 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000454 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100455 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000456 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000457 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000458 {
459 quantizationScales.push_back(affineQuantization->scale->data[i]);
460 }
461 ret.SetQuantizationScales(quantizationScales);
Jan Eilers7612bd62021-04-06 17:29:03 +0100462 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
Sadik Armagan62483be2020-10-23 17:14:43 +0100463 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000464 else
465 {
466 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
467 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
468 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100469 }
470 else
471 {
472 auto quantizationParameters = tfLiteTensor.params;
473 ret.SetQuantizationScale(quantizationParameters.scale);
474 ret.SetQuantizationOffset(quantizationParameters.zero_point);
475 }
476
477 return ret;
478}
479
Sadik Armagan4189cc52020-11-11 18:01:48 +0000480armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
481 armnn::TensorInfo& tensorInfo,
Jan Eilers53ef7952021-06-02 12:01:25 +0100482 armnn::Optional<armnn::PermutationVector&>
483 permutationVector = armnn::EmptyOptional(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000484 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000485{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000486 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
487 {
Keith Davis892fafe2020-11-26 17:40:35 +0000488 throw armnn::Exception(
489 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000490 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000491
Matthew Sloyan81beae32021-07-13 19:46:11 +0100492 if(tflite::IsConstantTensor(tfLiteTensor))
493 {
494 tensorInfo.SetConstant();
495 }
496
Sadik Armagan32ca1442020-11-13 17:51:56 +0000497 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000498 {
Jan Eilers7612bd62021-04-06 17:29:03 +0100499 // Permute tensor info
500 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
501 // then permute data using the shape from permuted tensor info
502 armnnUtils::Permute(tensorInfo.GetShape(),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000503 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000504 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000505 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000506 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000507
Jan Eilers7612bd62021-04-06 17:29:03 +0100508 return armnn::ConstTensor(tensorInfo, permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000509 }
510 else
511 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000512 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000513 }
514}
515
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100516armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
517{
518 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
519 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
520 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
521}
522
Sadik Armagan32ca1442020-11-13 17:51:56 +0000523void CalcPadding(uint32_t inputSize,
524 uint32_t filterSize,
525 uint32_t stride,
526 uint32_t dilation,
527 uint32_t& paddingFront,
528 uint32_t& paddingBack,
529 TfLitePadding padding)
530{
531 paddingFront = 0;
532 paddingBack = 0;
533 if (padding == kTfLitePaddingSame)
534 {
535 uint32_t outputSize = (inputSize + stride - 1) / stride;
536 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
537 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
538 if (temp > inputSize)
539 {
540 paddingFront = (temp - inputSize) / 2;
541 paddingBack = (temp - inputSize) - paddingFront;
542 }
543 }
544}
545
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000546TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
547 armnn::TensorInfo& constTensorInfo,
548 TfLiteContext* tfLiteContext,
549 const TfLiteTensor& tfLiteTensor,
550 armnnDelegate::DelegateData& data,
551 unsigned int slotIndex)
552{
Keith Davis892fafe2020-11-26 17:40:35 +0000553 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000554 bool isSupported = false;
555 FORWARD_LAYER_SUPPORT_FUNC(__func__,
556 tfLiteContext,
557 IsConstantSupported,
558 data.m_Backends,
559 isSupported,
560 constTensorInfo);
561 if (!isSupported)
562 {
563 return kTfLiteError;
564 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000565
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000566 auto constantInput = CreateConstTensor(&tfLiteTensor,
567 constTensorInfo,
568 armnn::Optional<armnn::PermutationVector&>());
569 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
570 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
571 outputSlot.SetTensorInfo(constTensorInfo);
572
573 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
574
575 return kTfLiteOk;
576}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000577
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100578bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
579{
580 if (tfLiteNode->inputs->data[operandIndex] < 0) {
581 return true;
582 }
583 return false;
584
585}
586
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100587TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
588 armnnDelegate::DelegateData& delegateData,
589 TfLiteContext* tfLiteContext,
590 TfLiteNode* tfLiteNode)
591{
592 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
593 // Process input tensors
594 // If input tensor is a Constant tensor create a constant layer and connect it to the network
595 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
596 {
597 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
Jim Flynn4b2f3472021-10-13 21:20:07 +0100598 if (tflite::IsConstantTensor(&tfLiteInputTensor))
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100599 {
600 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
601 bool isSupported = false;
602 FORWARD_LAYER_SUPPORT_FUNC(__func__,
603 tfLiteContext,
604 IsConstantSupported,
605 delegateData.m_Backends,
606 isSupported,
607 inputTensorInfo);
608 if (!isSupported)
609 {
610 return kTfLiteError;
611 }
612 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
613 inputTensorInfo,
614 armnn::Optional<armnn::PermutationVector&>());
615 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
616 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
617 outputSlot.SetTensorInfo(inputTensorInfo);
618
Sadik Armagan67ac7fa2021-05-07 14:16:13 +0100619 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100620 }
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100621 }
622 return kTfLiteOk;
623}
624
Matthew Sloyand30bfb52021-04-18 16:40:00 +0100625unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
626{
627 int numDims = armnn::numeric_cast<int>(numDimensions);
628 int wrappedIndex = index < 0 ? numDims + index : index;
629 ARMNN_ASSERT(wrappedIndex >= 0);
630 ARMNN_ASSERT(wrappedIndex < numDims);
631
632 return static_cast<unsigned int>(wrappedIndex);
633};
634
Jim Flynn4b2f3472021-10-13 21:20:07 +0100635bool AreAllSigned32(const armnn::TensorInfo& inputInfo1,
636 const armnn::TensorInfo& inputInfo2,
637 const armnn::TensorInfo& outputInfo)
638{
639 return (armnn::DataType::Signed32 == inputInfo1.GetDataType()) &&
640 (armnn::DataType::Signed32 == inputInfo2.GetDataType()) &&
641 (armnn::DataType::Signed32 == outputInfo.GetDataType());
642}
643
Sadik Armagan62483be2020-10-23 17:14:43 +0100644} // namespace anonymous