blob: 990f2107349dcd0c95786fbf62726c009c917914 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9#include <armnn/BackendHelper.hpp>
10#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000011#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010012
Sadik Armagan6e36a642020-11-10 21:18:41 +000013#include <armnnUtils/Permute.hpp>
14
Sadik Armagan62483be2020-10-23 17:14:43 +010015#include <tensorflow/lite/builtin_ops.h>
16#include <tensorflow/lite/c/builtin_op_data.h>
17#include <tensorflow/lite/c/common.h>
18#include <tensorflow/lite/minimal_logging.h>
19
Sadik Armagan05e9fd22020-11-17 12:01:47 +000020#include "tensorflow/lite/kernels/kernel_util.h"
21
Sadik Armagan62483be2020-10-23 17:14:43 +010022namespace
23{
24
25// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
26#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
27try \
28{ \
29 for (auto&& backendId : backends) \
30 { \
31 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
32 if (layerSupportObject) \
33 { \
34 std::string reasonIfUnsupported; \
35 supported = \
36 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
37 if (supported) \
38 { \
39 break; \
40 } \
41 else \
42 { \
43 if (reasonIfUnsupported.size() > 0) \
44 { \
45 TF_LITE_KERNEL_LOG( \
46 tfLiteContext, "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
47 } \
48 else \
49 { \
50 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by armnn", funcName); \
51 } \
52 } \
53 } \
54 else \
55 { \
56 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
57 } \
58 } \
59 if (!supported) \
60 { \
61 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
62 } \
63} \
64catch (const armnn::InvalidArgumentException &e) \
65{ \
66 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
67}
68
69TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
70 TfLiteNode* tfLiteNode,
71 const unsigned int expectedSize,
72 int nodeIndex)
73{
74 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000075 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010076 {
77 TF_LITE_MAYBE_KERNEL_LOG(
78 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
79 numInputs, expectedSize, nodeIndex);
80 return kTfLiteError;
81 }
82 return kTfLiteOk;
83}
84
85TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
86 TfLiteNode* tfLiteNode,
87 const unsigned int expectedSize,
88 int nodeIndex)
89{
90 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000091 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010092 {
93 TF_LITE_MAYBE_KERNEL_LOG(
94 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
95 numOutputs, expectedSize, nodeIndex);
96 return kTfLiteError;
97 }
98 return kTfLiteOk;
99}
100
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000101bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
102{
103 auto tensorAllocationType = tfLiteTensor.allocation_type;
104 if (tensorAllocationType == kTfLiteDynamic)
105 {
106 return true;
107 }
108 return false;
109}
110
Sadik Armagan6e36a642020-11-10 21:18:41 +0000111bool IsValid(const TfLiteTensor* tfLiteTensor)
112{
113 return tfLiteTensor == nullptr ? false : true;
114}
115
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000116bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
117{
118 if(!IsValid(&tfLiteTensor))
119 {
120 std::cout << "..Is Not Valid" << std::endl;
121 TF_LITE_MAYBE_KERNEL_LOG(
122 tfLiteContext,
123 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
124 operatorCode, nodeIndex);
125 return false;
126 }
127 if (IsDynamicTensor(tfLiteTensor))
128 {
129 std::cout << "..IsDynamicTensor" << std::endl;
130 TF_LITE_MAYBE_KERNEL_LOG(
131 tfLiteContext,
132 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
133 operatorCode, nodeIndex);
134 return false;
135 }
136 return true;
137}
138
Sadik Armagan32ca1442020-11-13 17:51:56 +0000139uint32_t NonNegative(int32_t value, int nodeIndex)
140{
141 if (value < 0)
142 {
Keith Davis892fafe2020-11-26 17:40:35 +0000143 throw armnn::Exception(
144 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000145 }
146 else
147 {
148 return static_cast<uint32_t>(value);
149 }
150}
151
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000152bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
153{
154 auto quantizationInfo = tfLiteTensor.quantization;
155 if (quantizationInfo.type == kTfLiteAffineQuantization)
156 {
157 return true;
158 }
159 return false;
160}
161
Sadik Armagan67e95f22020-10-29 16:14:54 +0000162TfLiteStatus Connect(armnn::IConnectableLayer* layer,
163 TfLiteNode* tfLiteNode,
164 armnnDelegate::DelegateData& data)
165{
Keith Davis892fafe2020-11-26 17:40:35 +0000166 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000167
168 // Connect the input slots
169 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
170 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000171 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
172 {
173 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
174 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000175 }
176
177 // Prepare output slots
178 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
179 {
180 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000181 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000182 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000183
Sadik Armagan67e95f22020-10-29 16:14:54 +0000184 return kTfLiteOk;
185}
186
187armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
188 const armnn::TensorInfo& inputInfo1,
189 armnn::IConnectableLayer* startLayer,
190 TfLiteContext* tfLiteContext,
191 TfLiteNode* tfLiteNode,
192 armnnDelegate::DelegateData& delegateData)
193{
194 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
195 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
196
197 if (inputDimensions0 == inputDimensions1)
198 {
199 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000200 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000201 }
202
203 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000204 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
205 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000206
207 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
208 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
209 const armnn::TensorShape& smallShape = smallInfo.GetShape();
210
211 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
212 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
213 {
214 reshapedDimensions[i] = smallShape[i - dimDifference];
215 }
216
217 armnn::TensorInfo reshapedInfo = smallInfo;
218 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
219 reshapedDimensions.data() });
220
221 armnn::ReshapeDescriptor reshapeDescriptor;
222 bool isSupported = false;
223 FORWARD_LAYER_SUPPORT_FUNC(__func__,
224 tfLiteContext,
225 IsReshapeSupported,
226 delegateData.m_Backends,
227 isSupported,
228 smallInfo,
229 reshapedInfo,
230 reshapeDescriptor);
231 if (!isSupported)
232 {
233 return nullptr;
234 }
235
236 ARMNN_ASSERT(delegateData.m_Network != nullptr);
237 // Add Reshape layer
238 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
239
240 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
241 ARMNN_ASSERT(reshapeLayer != nullptr);
242 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
243
244 if (input0IsSmaller)
245 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000246 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
247 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000248 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000249 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
250 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000251 }
252 else
253 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000254 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
255 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000256 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000257 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
258 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000259 }
260
261 // Prepare output slots
262 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
263 {
264 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000265 delegateData.m_OutputSlotForNode
266 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000267 }
268
269 return reshapeLayer;
270}
271
272TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
273 TfLiteNode* tfLiteNode,
274 TfLiteFusedActivation activationType,
275 armnn::IConnectableLayer* prevLayer,
276 unsigned int outputSlotIndex,
277 armnnDelegate::DelegateData& data)
278{
279
Finn Williams6f9f9902020-11-13 13:23:15 +0000280 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000281
282 armnn::ActivationDescriptor activationDesc;
283
284 switch (activationType)
285 {
286 case kTfLiteActNone:
287 {
288 // No Activation
289 return kTfLiteOk;
290 }
291 case kTfLiteActRelu:
292 {
293 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
294 break;
295 }
296 case kTfLiteActRelu1:
297 {
298 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
299 activationDesc.m_A = 1.0f;
300 activationDesc.m_B = -1.0f;
301 break;
302 }
303 case kTfLiteActRelu6:
304 {
305 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
306 activationDesc.m_A = 6.0f;
307 activationDesc.m_B = 0.0f;
308 break;
309 }
310 case kTfLiteActSigmoid:
311 {
312 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
313 break;
314 }
315 case kTfLiteActTanh:
316 {
317 activationDesc.m_Function = armnn::ActivationFunction::TanH;
318 activationDesc.m_A = 1.0f;
319 activationDesc.m_B = 1.0f;
320 break;
321 }
322 default:
323 return kTfLiteError;
324 }
325
326 bool isSupported = false;
327 FORWARD_LAYER_SUPPORT_FUNC(__func__,
328 tfLiteContext,
329 IsActivationSupported,
330 data.m_Backends,
331 isSupported,
332 prevLayer->GetOutputSlot(0).GetTensorInfo(),
333 activationOutputInfo,
334 activationDesc);
335 if (!isSupported)
336 {
337 return kTfLiteError;
338 }
339 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
340
341 ARMNN_ASSERT(activationLayer != nullptr);
342 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
343
344 // Connect and prepare output slots
345 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
346 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000347 data.m_OutputSlotForNode[static_cast<unsigned long>(
348 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000349 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000350 data.m_OutputSlotForNode[static_cast<unsigned long>(
351 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000352 }
353 return kTfLiteOk;
354}
355
Sadik Armagan6e36a642020-11-10 21:18:41 +0000356armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100357{
Sadik Armagan62483be2020-10-23 17:14:43 +0100358 switch (tfLiteTensor.type)
359 {
360 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000361 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100362 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000363 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100364 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000365 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100366 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000367 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100368 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000369 {
370 auto quantizationInfo = tfLiteTensor.quantization;
371 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000372 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000373 auto* quantization =
374 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
375 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
376 {
377 return armnn::DataType::QAsymmS8;
378 }
379 else
380 {
381 return armnn::DataType::QSymmS8;
382 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000383 }
384 else
385 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000386 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000387 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000388 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100389 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000390 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100391 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000392 return armnn::DataType::Signed32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100393 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000394 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100395 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000396}
Sadik Armagan62483be2020-10-23 17:14:43 +0100397
Sadik Armagan32ca1442020-11-13 17:51:56 +0000398armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor,
399 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Sadik Armagan6e36a642020-11-10 21:18:41 +0000400{
401 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100402 armnn::TensorInfo ret;
403 auto tensorDimensionSize = tfLiteTensor.dims->size;
404 if (tensorDimensionSize == 0)
405 {
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000406 if(tflite::IsConstantTensor(&tfLiteTensor))
407 {
408 std::vector<unsigned int> safeShape = { 1 };
409 bool dimensionsSpecificity[1] = { true };
410 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
411 safeShape.data(),
412 dimensionsSpecificity);
413 ret = armnn::TensorInfo(tensorShape, type);
414 }
415 else
416 {
417 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
418 ret = armnn::TensorInfo(tensorShape, type);
419 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100420 }
421 else
422 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000423 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100424 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000425 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100426 auto dim = tfLiteTensor.dims->data[i];
427 if (dim == 0)
428 {
429 dimensionsSpecificity[i] = false;
430 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000431 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100432 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000433 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
434 tensorDims.data(),
435 dimensionsSpecificity);
Sadik Armagan62483be2020-10-23 17:14:43 +0100436 ret = armnn::TensorInfo(tensorShape, type);
437 }
438
439 auto quantizationInfo = tfLiteTensor.quantization;
440 if (quantizationInfo.type == kTfLiteAffineQuantization)
441 {
442 // get per-channel quantization parameters
443 const auto* affineQuantization =
444 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000445 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100446 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000447 std::vector<float> quantizationScales;
Finn Williams6f9f9902020-11-13 13:23:15 +0000448 for (unsigned int i = 1; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000449 {
450 quantizationScales.push_back(affineQuantization->scale->data[i]);
451 }
452 ret.SetQuantizationScales(quantizationScales);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000453 ret.SetQuantizationDim(dimensionMappings[armnn::numeric_cast<unsigned int>(
454 affineQuantization->quantized_dimension)]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100455 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000456 else
457 {
458 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
459 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
460 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100461 }
462 else
463 {
464 auto quantizationParameters = tfLiteTensor.params;
465 ret.SetQuantizationScale(quantizationParameters.scale);
466 ret.SetQuantizationOffset(quantizationParameters.zero_point);
467 }
468
469 return ret;
470}
471
Sadik Armagan4189cc52020-11-11 18:01:48 +0000472armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
473 armnn::TensorInfo& tensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000474 armnn::Optional<armnn::PermutationVector&> permutationVector,
475 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000476{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000477 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
478 {
Keith Davis892fafe2020-11-26 17:40:35 +0000479 throw armnn::Exception(
480 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000481 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000482
Sadik Armagan32ca1442020-11-13 17:51:56 +0000483 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000484 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000485 armnnUtils::Permute(armnnUtils::Permuted(tensorInfo.GetShape(), permutationVector.value()),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000486 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000487 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000488 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000489 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000490
491 return armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, permutationVector.value()), permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000492 }
493 else
494 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000495 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000496 }
497}
498
Sadik Armagan32ca1442020-11-13 17:51:56 +0000499void CalcPadding(uint32_t inputSize,
500 uint32_t filterSize,
501 uint32_t stride,
502 uint32_t dilation,
503 uint32_t& paddingFront,
504 uint32_t& paddingBack,
505 TfLitePadding padding)
506{
507 paddingFront = 0;
508 paddingBack = 0;
509 if (padding == kTfLitePaddingSame)
510 {
511 uint32_t outputSize = (inputSize + stride - 1) / stride;
512 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
513 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
514 if (temp > inputSize)
515 {
516 paddingFront = (temp - inputSize) / 2;
517 paddingBack = (temp - inputSize) - paddingFront;
518 }
519 }
520}
521
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000522TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
523 armnn::TensorInfo& constTensorInfo,
524 TfLiteContext* tfLiteContext,
525 const TfLiteTensor& tfLiteTensor,
526 armnnDelegate::DelegateData& data,
527 unsigned int slotIndex)
528{
Keith Davis892fafe2020-11-26 17:40:35 +0000529 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000530 bool isSupported = false;
531 FORWARD_LAYER_SUPPORT_FUNC(__func__,
532 tfLiteContext,
533 IsConstantSupported,
534 data.m_Backends,
535 isSupported,
536 constTensorInfo);
537 if (!isSupported)
538 {
539 return kTfLiteError;
540 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000541
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000542 auto constantInput = CreateConstTensor(&tfLiteTensor,
543 constTensorInfo,
544 armnn::Optional<armnn::PermutationVector&>());
545 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
546 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
547 outputSlot.SetTensorInfo(constTensorInfo);
548
549 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
550
551 return kTfLiteOk;
552}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000553
Sadik Armagan62483be2020-10-23 17:14:43 +0100554} // namespace anonymous