blob: 850b279fea48e88c5e7520b6469508d95bafc862 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Sadik Armagan90a119b2022-08-05 16:12:49 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan81ec9942021-10-12 10:26:30 +01008#include <armnn_delegate.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <armnn/ArmNN.hpp>
11#include <armnn/BackendHelper.hpp>
12#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000013#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010014
Sadik Armagan6e36a642020-11-10 21:18:41 +000015#include <armnnUtils/Permute.hpp>
16
Sadik Armagan62483be2020-10-23 17:14:43 +010017#include <tensorflow/lite/builtin_ops.h>
18#include <tensorflow/lite/c/builtin_op_data.h>
19#include <tensorflow/lite/c/common.h>
20#include <tensorflow/lite/minimal_logging.h>
21
Sadik Armagan05e9fd22020-11-17 12:01:47 +000022#include "tensorflow/lite/kernels/kernel_util.h"
23
Sadik Armagan62483be2020-10-23 17:14:43 +010024namespace
25{
26
27// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
Cathal Corbett53837672022-09-01 11:34:37 +010028#define FORWARD_LAYER_SUPPORT_FUNC(opName, tfLiteContext, func, backends, supported, setBackend, ...) \
Sadik Armagan62483be2020-10-23 17:14:43 +010029try \
30{ \
31 for (auto&& backendId : backends) \
32 { \
33 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000034 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010035 { \
36 std::string reasonIfUnsupported; \
37 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000038 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010039 if (supported) \
40 { \
Cathal Corbett53837672022-09-01 11:34:37 +010041 setBackend = backendId; \
Sadik Armagan62483be2020-10-23 17:14:43 +010042 break; \
43 } \
44 else \
45 { \
46 if (reasonIfUnsupported.size() > 0) \
47 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000048 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000049 "%s: not supported by armnn: %s", opName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010050 } \
51 else \
52 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000053 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000054 "%s: not supported by armnn", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010055 } \
56 } \
57 } \
58 else \
59 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000060 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", opName, backendId.Get().c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010061 } \
62 } \
63 if (!supported) \
64 { \
Sadik Armaganbfa767c2022-02-09 14:58:03 +000065 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", opName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010066 } \
67} \
68catch (const armnn::InvalidArgumentException &e) \
69{ \
70 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
71}
72
73TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
74 TfLiteNode* tfLiteNode,
75 const unsigned int expectedSize,
76 int nodeIndex)
77{
78 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000079 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010080 {
81 TF_LITE_MAYBE_KERNEL_LOG(
82 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
83 numInputs, expectedSize, nodeIndex);
84 return kTfLiteError;
85 }
86 return kTfLiteOk;
87}
88
89TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
90 TfLiteNode* tfLiteNode,
91 const unsigned int expectedSize,
92 int nodeIndex)
93{
94 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000095 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010096 {
97 TF_LITE_MAYBE_KERNEL_LOG(
98 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
99 numOutputs, expectedSize, nodeIndex);
100 return kTfLiteError;
101 }
102 return kTfLiteOk;
103}
104
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000105bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
106{
107 auto tensorAllocationType = tfLiteTensor.allocation_type;
108 if (tensorAllocationType == kTfLiteDynamic)
109 {
110 return true;
111 }
112 return false;
113}
114
Sadik Armagan6e36a642020-11-10 21:18:41 +0000115bool IsValid(const TfLiteTensor* tfLiteTensor)
116{
117 return tfLiteTensor == nullptr ? false : true;
118}
119
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000120bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
121{
122 if(!IsValid(&tfLiteTensor))
123 {
124 std::cout << "..Is Not Valid" << std::endl;
125 TF_LITE_MAYBE_KERNEL_LOG(
126 tfLiteContext,
127 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
128 operatorCode, nodeIndex);
129 return false;
130 }
131 if (IsDynamicTensor(tfLiteTensor))
132 {
133 std::cout << "..IsDynamicTensor" << std::endl;
134 TF_LITE_MAYBE_KERNEL_LOG(
135 tfLiteContext,
136 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
137 operatorCode, nodeIndex);
138 return false;
139 }
140 return true;
141}
142
Sadik Armagan32ca1442020-11-13 17:51:56 +0000143uint32_t NonNegative(int32_t value, int nodeIndex)
144{
145 if (value < 0)
146 {
Keith Davis892fafe2020-11-26 17:40:35 +0000147 throw armnn::Exception(
148 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000149 }
150 else
151 {
152 return static_cast<uint32_t>(value);
153 }
154}
155
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000156bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
157{
158 auto quantizationInfo = tfLiteTensor.quantization;
159 if (quantizationInfo.type == kTfLiteAffineQuantization)
160 {
161 return true;
162 }
163 return false;
164}
165
Sadik Armagan67e95f22020-10-29 16:14:54 +0000166TfLiteStatus Connect(armnn::IConnectableLayer* layer,
167 TfLiteNode* tfLiteNode,
168 armnnDelegate::DelegateData& data)
169{
Keith Davis892fafe2020-11-26 17:40:35 +0000170 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000171
172 // Connect the input slots
173 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
174 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000175 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
176 {
177 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
178 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000179 }
180
181 // Prepare output slots
182 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
183 {
184 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000185 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000186 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000187
Sadik Armagan67e95f22020-10-29 16:14:54 +0000188 return kTfLiteOk;
189}
190
191armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
192 const armnn::TensorInfo& inputInfo1,
193 armnn::IConnectableLayer* startLayer,
194 TfLiteContext* tfLiteContext,
195 TfLiteNode* tfLiteNode,
196 armnnDelegate::DelegateData& delegateData)
197{
198 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
199 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
200
201 if (inputDimensions0 == inputDimensions1)
202 {
203 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000204 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000205 }
206
207 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000208 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
209 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000210
211 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
212 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
213 const armnn::TensorShape& smallShape = smallInfo.GetShape();
214
215 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
216 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
217 {
218 reshapedDimensions[i] = smallShape[i - dimDifference];
219 }
220
221 armnn::TensorInfo reshapedInfo = smallInfo;
222 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
223 reshapedDimensions.data() });
224
225 armnn::ReshapeDescriptor reshapeDescriptor;
Sadik Armaganadeebaa2022-01-18 14:31:05 +0000226 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000227 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100228 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000229 FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000230 tfLiteContext,
231 IsReshapeSupported,
232 delegateData.m_Backends,
233 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100234 setBackend,
Sadik Armagan67e95f22020-10-29 16:14:54 +0000235 smallInfo,
236 reshapedInfo,
237 reshapeDescriptor);
238 if (!isSupported)
239 {
240 return nullptr;
241 }
242
243 ARMNN_ASSERT(delegateData.m_Network != nullptr);
244 // Add Reshape layer
Sadik Armagan67e95f22020-10-29 16:14:54 +0000245 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100246 reshapeLayer->SetBackendId(setBackend);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000247 ARMNN_ASSERT(reshapeLayer != nullptr);
248 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
249
250 if (input0IsSmaller)
251 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000252 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
253 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000254 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000255 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
256 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000257 }
258 else
259 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000260 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
261 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000262 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000263 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
264 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000265 }
266
267 // Prepare output slots
268 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
269 {
270 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000271 delegateData.m_OutputSlotForNode
272 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000273 }
274
275 return reshapeLayer;
276}
277
278TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
279 TfLiteNode* tfLiteNode,
280 TfLiteFusedActivation activationType,
281 armnn::IConnectableLayer* prevLayer,
282 unsigned int outputSlotIndex,
283 armnnDelegate::DelegateData& data)
284{
285
Finn Williams6f9f9902020-11-13 13:23:15 +0000286 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000287
288 armnn::ActivationDescriptor activationDesc;
289
290 switch (activationType)
291 {
292 case kTfLiteActNone:
293 {
294 // No Activation
295 return kTfLiteOk;
296 }
297 case kTfLiteActRelu:
298 {
299 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
300 break;
301 }
Keith Davis9a701c82021-09-28 16:43:24 +0100302// The name of kTfLiteActRelu1 changed after TF Lite v2.3
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100303#if defined(ARMNN_POST_TFLITE_2_3)
Keith Davis9a701c82021-09-28 16:43:24 +0100304 case kTfLiteActReluN1To1:
305#else
Sadik Armagan67e95f22020-10-29 16:14:54 +0000306 case kTfLiteActRelu1:
Keith Davis9a701c82021-09-28 16:43:24 +0100307#endif
Sadik Armagan67e95f22020-10-29 16:14:54 +0000308 {
309 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
310 activationDesc.m_A = 1.0f;
311 activationDesc.m_B = -1.0f;
312 break;
313 }
314 case kTfLiteActRelu6:
315 {
316 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
317 activationDesc.m_A = 6.0f;
318 activationDesc.m_B = 0.0f;
319 break;
320 }
321 case kTfLiteActSigmoid:
322 {
323 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
324 break;
325 }
326 case kTfLiteActTanh:
327 {
328 activationDesc.m_Function = armnn::ActivationFunction::TanH;
329 activationDesc.m_A = 1.0f;
330 activationDesc.m_B = 1.0f;
331 break;
332 }
333 default:
334 return kTfLiteError;
335 }
336
337 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100338 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000339 FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
Sadik Armagan67e95f22020-10-29 16:14:54 +0000340 tfLiteContext,
341 IsActivationSupported,
342 data.m_Backends,
343 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100344 setBackend,
Sadik Armagan67e95f22020-10-29 16:14:54 +0000345 prevLayer->GetOutputSlot(0).GetTensorInfo(),
346 activationOutputInfo,
347 activationDesc);
348 if (!isSupported)
349 {
350 return kTfLiteError;
351 }
352 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
Cathal Corbett53837672022-09-01 11:34:37 +0100353 activationLayer->SetBackendId(setBackend);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000354
355 ARMNN_ASSERT(activationLayer != nullptr);
356 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
357
358 // Connect and prepare output slots
359 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
360 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000361 data.m_OutputSlotForNode[static_cast<unsigned long>(
362 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000363 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000364 data.m_OutputSlotForNode[static_cast<unsigned long>(
365 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000366 }
367 return kTfLiteOk;
368}
369
Sadik Armagan6e36a642020-11-10 21:18:41 +0000370armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100371{
Sadik Armagan62483be2020-10-23 17:14:43 +0100372 switch (tfLiteTensor.type)
373 {
374 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000375 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100376 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000377 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100378 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000379 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100380 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000381 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100382 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000383 {
384 auto quantizationInfo = tfLiteTensor.quantization;
385 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000386 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000387 auto* quantization =
388 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
389 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
390 {
391 return armnn::DataType::QAsymmS8;
392 }
393 else
394 {
395 return armnn::DataType::QSymmS8;
396 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000397 }
398 else
399 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000400 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000401 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000402 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100403 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000404 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100405 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000406 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100407 case kTfLiteInt64:
408 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100409 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000410 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100411 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000412}
Sadik Armagan62483be2020-10-23 17:14:43 +0100413
Sadik Armagan90a119b2022-08-05 16:12:49 +0100414armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor, bool isOutput = false)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000415{
416 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100417 armnn::TensorInfo ret;
418 auto tensorDimensionSize = tfLiteTensor.dims->size;
419 if (tensorDimensionSize == 0)
420 {
Sadik Armagan90a119b2022-08-05 16:12:49 +0100421 // If input tensor does not have a shape
422 // assuming that it has 1D tensor
423 if (!isOutput)
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000424 {
425 std::vector<unsigned int> safeShape = { 1 };
426 bool dimensionsSpecificity[1] = { true };
427 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
428 safeShape.data(),
429 dimensionsSpecificity);
430 ret = armnn::TensorInfo(tensorShape, type);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100431 if(tflite::IsConstantTensor(&tfLiteTensor))
432 {
433 ret.SetConstant(true);
434 }
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000435 }
436 else
437 {
438 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
439 ret = armnn::TensorInfo(tensorShape, type);
440 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100441 }
442 else
443 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000444 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100445 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000446 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100447 auto dim = tfLiteTensor.dims->data[i];
448 if (dim == 0)
449 {
450 dimensionsSpecificity[i] = false;
451 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000452 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100453 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000454 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
455 tensorDims.data(),
456 dimensionsSpecificity);
Cathal Corbett5b8093c2021-10-22 11:12:07 +0100457
458 if(tflite::IsConstantTensor(&tfLiteTensor))
459 {
460 ret = armnn::TensorInfo(tensorShape, type);
461 ret.SetConstant(true);
462 }
463 else
464 {
465 ret = armnn::TensorInfo(tensorShape, type);
466 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100467 }
468
469 auto quantizationInfo = tfLiteTensor.quantization;
470 if (quantizationInfo.type == kTfLiteAffineQuantization)
471 {
472 // get per-channel quantization parameters
473 const auto* affineQuantization =
474 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000475 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100476 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000477 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000478 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000479 {
480 quantizationScales.push_back(affineQuantization->scale->data[i]);
481 }
482 ret.SetQuantizationScales(quantizationScales);
Jan Eilers7612bd62021-04-06 17:29:03 +0100483 ret.SetQuantizationDim(armnn::numeric_cast<unsigned int>(affineQuantization->quantized_dimension));
Sadik Armagan62483be2020-10-23 17:14:43 +0100484 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000485 else
486 {
487 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
488 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
489 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100490 }
491 else
492 {
493 auto quantizationParameters = tfLiteTensor.params;
494 ret.SetQuantizationScale(quantizationParameters.scale);
495 ret.SetQuantizationOffset(quantizationParameters.zero_point);
496 }
497
498 return ret;
499}
500
Sadik Armagan4189cc52020-11-11 18:01:48 +0000501armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
502 armnn::TensorInfo& tensorInfo,
Jan Eilers53ef7952021-06-02 12:01:25 +0100503 armnn::Optional<armnn::PermutationVector&>
504 permutationVector = armnn::EmptyOptional(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000505 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000506{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000507 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
508 {
Keith Davis892fafe2020-11-26 17:40:35 +0000509 throw armnn::Exception(
510 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000511 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000512
Matthew Sloyan81beae32021-07-13 19:46:11 +0100513 if(tflite::IsConstantTensor(tfLiteTensor))
514 {
515 tensorInfo.SetConstant();
516 }
517
Sadik Armagan32ca1442020-11-13 17:51:56 +0000518 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000519 {
Jan Eilers7612bd62021-04-06 17:29:03 +0100520 // Permute tensor info
521 tensorInfo = armnnUtils::Permuted(tensorInfo, permutationVector.value());
522 // then permute data using the shape from permuted tensor info
523 armnnUtils::Permute(tensorInfo.GetShape(),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000524 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000525 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000526 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000527 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000528
Jan Eilers7612bd62021-04-06 17:29:03 +0100529 return armnn::ConstTensor(tensorInfo, permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000530 }
531 else
532 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000533 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000534 }
535}
536
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100537armnn::ConstTensor* GetConstTensorForTfLiteTensor(const TfLiteTensor* tfLiteTensors, TfLiteNode* tfLiteNode, int index)
538{
539 const TfLiteTensor &tfLiteTensor = tfLiteTensors[tfLiteNode->inputs->data[index]];
540 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteTensor);
541 return new armnn::ConstTensor(tensorInfo, tfLiteTensor.data.data);
542}
543
Sadik Armagan32ca1442020-11-13 17:51:56 +0000544void CalcPadding(uint32_t inputSize,
545 uint32_t filterSize,
546 uint32_t stride,
547 uint32_t dilation,
548 uint32_t& paddingFront,
549 uint32_t& paddingBack,
550 TfLitePadding padding)
551{
552 paddingFront = 0;
553 paddingBack = 0;
554 if (padding == kTfLitePaddingSame)
555 {
556 uint32_t outputSize = (inputSize + stride - 1) / stride;
557 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
558 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
559 if (temp > inputSize)
560 {
561 paddingFront = (temp - inputSize) / 2;
562 paddingBack = (temp - inputSize) - paddingFront;
563 }
564 }
565}
566
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000567TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
568 armnn::TensorInfo& constTensorInfo,
569 TfLiteContext* tfLiteContext,
570 const TfLiteTensor& tfLiteTensor,
571 armnnDelegate::DelegateData& data,
572 unsigned int slotIndex)
573{
Keith Davis892fafe2020-11-26 17:40:35 +0000574 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000575 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100576 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000577 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000578 tfLiteContext,
579 IsConstantSupported,
580 data.m_Backends,
581 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100582 setBackend,
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000583 constTensorInfo);
584 if (!isSupported)
585 {
586 return kTfLiteError;
587 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000588
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000589 auto constantInput = CreateConstTensor(&tfLiteTensor,
590 constTensorInfo,
591 armnn::Optional<armnn::PermutationVector&>());
592 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
Cathal Corbett53837672022-09-01 11:34:37 +0100593 constantLayer->SetBackendId(setBackend);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000594 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
595 outputSlot.SetTensorInfo(constTensorInfo);
596
597 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
598
599 return kTfLiteOk;
600}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000601
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100602bool IsOptionalOperandPresent(TfLiteNode* tfLiteNode, const int operandIndex)
603{
Mike Kelly84d63782022-05-06 12:14:16 +0100604 // If the inputs array has fewer than operandIndex entries or if the entry at operandIndex has a value of -1 or
605 // less then the input is not present.
606 if (tfLiteNode->inputs->size > operandIndex && tfLiteNode->inputs->data[operandIndex] >= 0)
607 {
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100608 return true;
609 }
610 return false;
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100611}
612
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100613TfLiteStatus ProcessInputs(armnn::IConnectableLayer* layer,
614 armnnDelegate::DelegateData& delegateData,
615 TfLiteContext* tfLiteContext,
616 TfLiteNode* tfLiteNode)
617{
618 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
619 // Process input tensors
620 // If input tensor is a Constant tensor create a constant layer and connect it to the network
621 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
622 {
623 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[inputIndex]];
Jim Flynn4b2f3472021-10-13 21:20:07 +0100624 if (tflite::IsConstantTensor(&tfLiteInputTensor))
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100625 {
626 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
627 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100628 armnn::BackendId setBackend;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000629 FORWARD_LAYER_SUPPORT_FUNC("CONSTANT",
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100630 tfLiteContext,
631 IsConstantSupported,
632 delegateData.m_Backends,
633 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100634 setBackend,
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100635 inputTensorInfo);
636 if (!isSupported)
637 {
638 return kTfLiteError;
639 }
640 auto constantInput = CreateConstTensor(&tfLiteInputTensor,
641 inputTensorInfo,
642 armnn::Optional<armnn::PermutationVector&>());
643 armnn::IConnectableLayer* constantLayer = delegateData.m_Network->AddConstantLayer(constantInput);
Cathal Corbett53837672022-09-01 11:34:37 +0100644 constantLayer->SetBackendId(setBackend);
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100645 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
646 outputSlot.SetTensorInfo(inputTensorInfo);
647
Sadik Armagan67ac7fa2021-05-07 14:16:13 +0100648 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] = &outputSlot;
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100649 }
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100650 }
651 return kTfLiteOk;
652}
653
Matthew Sloyand30bfb52021-04-18 16:40:00 +0100654unsigned int ComputeWrappedIndex(int index, unsigned int numDimensions)
655{
656 int numDims = armnn::numeric_cast<int>(numDimensions);
657 int wrappedIndex = index < 0 ? numDims + index : index;
658 ARMNN_ASSERT(wrappedIndex >= 0);
659 ARMNN_ASSERT(wrappedIndex < numDims);
660
661 return static_cast<unsigned int>(wrappedIndex);
662};
663
Jim Flynn4b2f3472021-10-13 21:20:07 +0100664bool AreAllSigned32(const armnn::TensorInfo& inputInfo1,
665 const armnn::TensorInfo& inputInfo2,
666 const armnn::TensorInfo& outputInfo)
667{
668 return (armnn::DataType::Signed32 == inputInfo1.GetDataType()) &&
669 (armnn::DataType::Signed32 == inputInfo2.GetDataType()) &&
670 (armnn::DataType::Signed32 == outputInfo.GetDataType());
671}
672
Sadik Armagan90a119b2022-08-05 16:12:49 +0100673void UpdateConstantTensorOutputs(const armnn::TensorInfo& inputInfo, armnn::TensorInfo& outputInfo)
674{
675 // If input tensor info is constant and output tensor info shape is not specified
676 // set the output shape from input shape
677 if (inputInfo.IsConstant() && outputInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
678 {
679 outputInfo.SetShape(inputInfo.GetShape());
680 }
681 return;
682}
683
Sadik Armagan62483be2020-10-23 17:14:43 +0100684} // namespace anonymous