blob: 1e5782ec424acda11fba74687fcfdd037e11b1b5 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9#include <armnn/BackendHelper.hpp>
10#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000011#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010012
Sadik Armagan6e36a642020-11-10 21:18:41 +000013#include <armnnUtils/Permute.hpp>
14
Sadik Armagan62483be2020-10-23 17:14:43 +010015#include <tensorflow/lite/builtin_ops.h>
16#include <tensorflow/lite/c/builtin_op_data.h>
17#include <tensorflow/lite/c/common.h>
18#include <tensorflow/lite/minimal_logging.h>
19
Sadik Armagan05e9fd22020-11-17 12:01:47 +000020#include "tensorflow/lite/kernels/kernel_util.h"
21
Sadik Armagan62483be2020-10-23 17:14:43 +010022namespace
23{
24
25// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
26#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
27try \
28{ \
29 for (auto&& backendId : backends) \
30 { \
31 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
Francis Murtagh7909c532021-01-28 14:25:15 +000032 if (layerSupportObject.IsBackendRegistered()) \
Sadik Armagan62483be2020-10-23 17:14:43 +010033 { \
34 std::string reasonIfUnsupported; \
35 supported = \
Francis Murtagh7909c532021-01-28 14:25:15 +000036 layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
Sadik Armagan62483be2020-10-23 17:14:43 +010037 if (supported) \
38 { \
39 break; \
40 } \
41 else \
42 { \
43 if (reasonIfUnsupported.size() > 0) \
44 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000045 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
46 "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
Sadik Armagan62483be2020-10-23 17:14:43 +010047 } \
48 else \
49 { \
Narumol Prangnawarat0b51d5a2021-01-20 15:58:29 +000050 TFLITE_LOG_PROD(tflite::TFLITE_LOG_WARNING, \
51 "%s: not supported by armnn", funcName); \
Sadik Armagan62483be2020-10-23 17:14:43 +010052 } \
53 } \
54 } \
55 else \
56 { \
57 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
58 } \
59 } \
60 if (!supported) \
61 { \
62 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
63 } \
64} \
65catch (const armnn::InvalidArgumentException &e) \
66{ \
67 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
68}
69
70TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
71 TfLiteNode* tfLiteNode,
72 const unsigned int expectedSize,
73 int nodeIndex)
74{
75 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000076 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010077 {
78 TF_LITE_MAYBE_KERNEL_LOG(
79 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
80 numInputs, expectedSize, nodeIndex);
81 return kTfLiteError;
82 }
83 return kTfLiteOk;
84}
85
86TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
87 TfLiteNode* tfLiteNode,
88 const unsigned int expectedSize,
89 int nodeIndex)
90{
91 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000092 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010093 {
94 TF_LITE_MAYBE_KERNEL_LOG(
95 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
96 numOutputs, expectedSize, nodeIndex);
97 return kTfLiteError;
98 }
99 return kTfLiteOk;
100}
101
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000102bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
103{
104 auto tensorAllocationType = tfLiteTensor.allocation_type;
105 if (tensorAllocationType == kTfLiteDynamic)
106 {
107 return true;
108 }
109 return false;
110}
111
Sadik Armagan6e36a642020-11-10 21:18:41 +0000112bool IsValid(const TfLiteTensor* tfLiteTensor)
113{
114 return tfLiteTensor == nullptr ? false : true;
115}
116
Sadik Armagan34fa1bd2020-11-27 12:40:52 +0000117bool IsValid(TfLiteContext* tfLiteContext, const TfLiteTensor& tfLiteTensor, int32_t operatorCode, int32_t nodeIndex)
118{
119 if(!IsValid(&tfLiteTensor))
120 {
121 std::cout << "..Is Not Valid" << std::endl;
122 TF_LITE_MAYBE_KERNEL_LOG(
123 tfLiteContext,
124 "TfLiteArmnnDelegate: Invalid TfLite tensor in operator #%d node #%d: ",
125 operatorCode, nodeIndex);
126 return false;
127 }
128 if (IsDynamicTensor(tfLiteTensor))
129 {
130 std::cout << "..IsDynamicTensor" << std::endl;
131 TF_LITE_MAYBE_KERNEL_LOG(
132 tfLiteContext,
133 "TfLiteArmnnDelegate: Dynamic tensors are not supported in operator #%d node #%d: ",
134 operatorCode, nodeIndex);
135 return false;
136 }
137 return true;
138}
139
Sadik Armagan32ca1442020-11-13 17:51:56 +0000140uint32_t NonNegative(int32_t value, int nodeIndex)
141{
142 if (value < 0)
143 {
Keith Davis892fafe2020-11-26 17:40:35 +0000144 throw armnn::Exception(
145 "TfLiteArmnnDelegate: Non-negative value in node " + std::to_string(static_cast<int>(nodeIndex)));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000146 }
147 else
148 {
149 return static_cast<uint32_t>(value);
150 }
151}
152
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000153bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
154{
155 auto quantizationInfo = tfLiteTensor.quantization;
156 if (quantizationInfo.type == kTfLiteAffineQuantization)
157 {
158 return true;
159 }
160 return false;
161}
162
Sadik Armagan67e95f22020-10-29 16:14:54 +0000163TfLiteStatus Connect(armnn::IConnectableLayer* layer,
164 TfLiteNode* tfLiteNode,
165 armnnDelegate::DelegateData& data)
166{
Keith Davis892fafe2020-11-26 17:40:35 +0000167 ARMNN_ASSERT(static_cast<unsigned int>(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000168
169 // Connect the input slots
170 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
171 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000172 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
173 {
174 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
175 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000176 }
177
178 // Prepare output slots
179 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
180 {
181 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000182 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000183 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000184
Sadik Armagan67e95f22020-10-29 16:14:54 +0000185 return kTfLiteOk;
186}
187
188armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
189 const armnn::TensorInfo& inputInfo1,
190 armnn::IConnectableLayer* startLayer,
191 TfLiteContext* tfLiteContext,
192 TfLiteNode* tfLiteNode,
193 armnnDelegate::DelegateData& delegateData)
194{
195 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
196 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
197
198 if (inputDimensions0 == inputDimensions1)
199 {
200 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000201 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000202 }
203
204 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000205 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
206 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000207
208 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
209 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
210 const armnn::TensorShape& smallShape = smallInfo.GetShape();
211
212 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
213 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
214 {
215 reshapedDimensions[i] = smallShape[i - dimDifference];
216 }
217
218 armnn::TensorInfo reshapedInfo = smallInfo;
219 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
220 reshapedDimensions.data() });
221
222 armnn::ReshapeDescriptor reshapeDescriptor;
223 bool isSupported = false;
224 FORWARD_LAYER_SUPPORT_FUNC(__func__,
225 tfLiteContext,
226 IsReshapeSupported,
227 delegateData.m_Backends,
228 isSupported,
229 smallInfo,
230 reshapedInfo,
231 reshapeDescriptor);
232 if (!isSupported)
233 {
234 return nullptr;
235 }
236
237 ARMNN_ASSERT(delegateData.m_Network != nullptr);
238 // Add Reshape layer
239 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
240
241 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
242 ARMNN_ASSERT(reshapeLayer != nullptr);
243 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
244
245 if (input0IsSmaller)
246 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000247 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
248 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000249 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000250 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
251 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000252 }
253 else
254 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000255 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
256 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000257 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000258 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
259 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000260 }
261
262 // Prepare output slots
263 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
264 {
265 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000266 delegateData.m_OutputSlotForNode
267 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000268 }
269
270 return reshapeLayer;
271}
272
273TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
274 TfLiteNode* tfLiteNode,
275 TfLiteFusedActivation activationType,
276 armnn::IConnectableLayer* prevLayer,
277 unsigned int outputSlotIndex,
278 armnnDelegate::DelegateData& data)
279{
280
Finn Williams6f9f9902020-11-13 13:23:15 +0000281 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000282
283 armnn::ActivationDescriptor activationDesc;
284
285 switch (activationType)
286 {
287 case kTfLiteActNone:
288 {
289 // No Activation
290 return kTfLiteOk;
291 }
292 case kTfLiteActRelu:
293 {
294 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
295 break;
296 }
297 case kTfLiteActRelu1:
298 {
299 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
300 activationDesc.m_A = 1.0f;
301 activationDesc.m_B = -1.0f;
302 break;
303 }
304 case kTfLiteActRelu6:
305 {
306 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
307 activationDesc.m_A = 6.0f;
308 activationDesc.m_B = 0.0f;
309 break;
310 }
311 case kTfLiteActSigmoid:
312 {
313 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
314 break;
315 }
316 case kTfLiteActTanh:
317 {
318 activationDesc.m_Function = armnn::ActivationFunction::TanH;
319 activationDesc.m_A = 1.0f;
320 activationDesc.m_B = 1.0f;
321 break;
322 }
323 default:
324 return kTfLiteError;
325 }
326
327 bool isSupported = false;
328 FORWARD_LAYER_SUPPORT_FUNC(__func__,
329 tfLiteContext,
330 IsActivationSupported,
331 data.m_Backends,
332 isSupported,
333 prevLayer->GetOutputSlot(0).GetTensorInfo(),
334 activationOutputInfo,
335 activationDesc);
336 if (!isSupported)
337 {
338 return kTfLiteError;
339 }
340 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
341
342 ARMNN_ASSERT(activationLayer != nullptr);
343 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
344
345 // Connect and prepare output slots
346 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
347 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000348 data.m_OutputSlotForNode[static_cast<unsigned long>(
349 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000350 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000351 data.m_OutputSlotForNode[static_cast<unsigned long>(
352 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000353 }
354 return kTfLiteOk;
355}
356
Sadik Armagan6e36a642020-11-10 21:18:41 +0000357armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100358{
Sadik Armagan62483be2020-10-23 17:14:43 +0100359 switch (tfLiteTensor.type)
360 {
361 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000362 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100363 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000364 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100365 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000366 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100367 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000368 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100369 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000370 {
371 auto quantizationInfo = tfLiteTensor.quantization;
372 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000373 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000374 auto* quantization =
375 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
376 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
377 {
378 return armnn::DataType::QAsymmS8;
379 }
380 else
381 {
382 return armnn::DataType::QSymmS8;
383 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000384 }
385 else
386 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000387 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000388 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000389 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100390 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000391 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100392 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000393 return armnn::DataType::Signed32;
Mike Kelly1f140f72021-04-06 12:25:55 +0100394 case kTfLiteInt64:
395 return armnn::DataType::Signed64;
Sadik Armagan62483be2020-10-23 17:14:43 +0100396 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000397 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100398 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000399}
Sadik Armagan62483be2020-10-23 17:14:43 +0100400
Sadik Armagan32ca1442020-11-13 17:51:56 +0000401armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor,
402 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Sadik Armagan6e36a642020-11-10 21:18:41 +0000403{
404 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100405 armnn::TensorInfo ret;
406 auto tensorDimensionSize = tfLiteTensor.dims->size;
407 if (tensorDimensionSize == 0)
408 {
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000409 if(tflite::IsConstantTensor(&tfLiteTensor))
410 {
411 std::vector<unsigned int> safeShape = { 1 };
412 bool dimensionsSpecificity[1] = { true };
413 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
414 safeShape.data(),
415 dimensionsSpecificity);
416 ret = armnn::TensorInfo(tensorShape, type);
417 }
418 else
419 {
420 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
421 ret = armnn::TensorInfo(tensorShape, type);
422 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100423 }
424 else
425 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000426 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100427 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000428 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100429 auto dim = tfLiteTensor.dims->data[i];
430 if (dim == 0)
431 {
432 dimensionsSpecificity[i] = false;
433 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000434 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100435 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000436 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
437 tensorDims.data(),
438 dimensionsSpecificity);
Sadik Armagan62483be2020-10-23 17:14:43 +0100439 ret = armnn::TensorInfo(tensorShape, type);
440 }
441
442 auto quantizationInfo = tfLiteTensor.quantization;
443 if (quantizationInfo.type == kTfLiteAffineQuantization)
444 {
445 // get per-channel quantization parameters
446 const auto* affineQuantization =
447 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000448 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100449 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000450 std::vector<float> quantizationScales;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000451 for (unsigned int i = 0; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000452 {
453 quantizationScales.push_back(affineQuantization->scale->data[i]);
454 }
455 ret.SetQuantizationScales(quantizationScales);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000456 ret.SetQuantizationDim(dimensionMappings[armnn::numeric_cast<unsigned int>(
457 affineQuantization->quantized_dimension)]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100458 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000459 else
460 {
461 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
462 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
463 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100464 }
465 else
466 {
467 auto quantizationParameters = tfLiteTensor.params;
468 ret.SetQuantizationScale(quantizationParameters.scale);
469 ret.SetQuantizationOffset(quantizationParameters.zero_point);
470 }
471
472 return ret;
473}
474
Sadik Armagan4189cc52020-11-11 18:01:48 +0000475armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
476 armnn::TensorInfo& tensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000477 armnn::Optional<armnn::PermutationVector&> permutationVector,
478 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000479{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000480 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
481 {
Keith Davis892fafe2020-11-26 17:40:35 +0000482 throw armnn::Exception(
483 "TfLiteArmnnDelegate: Not constant allocation type: " + std::to_string(tfLiteTensor->allocation_type));
Sadik Armagan4189cc52020-11-11 18:01:48 +0000484 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000485
Sadik Armagan32ca1442020-11-13 17:51:56 +0000486 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000487 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000488 armnnUtils::Permute(armnnUtils::Permuted(tensorInfo.GetShape(), permutationVector.value()),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000489 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000490 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000491 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000492 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000493
494 return armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, permutationVector.value()), permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000495 }
496 else
497 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000498 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000499 }
500}
501
Sadik Armagan32ca1442020-11-13 17:51:56 +0000502void CalcPadding(uint32_t inputSize,
503 uint32_t filterSize,
504 uint32_t stride,
505 uint32_t dilation,
506 uint32_t& paddingFront,
507 uint32_t& paddingBack,
508 TfLitePadding padding)
509{
510 paddingFront = 0;
511 paddingBack = 0;
512 if (padding == kTfLitePaddingSame)
513 {
514 uint32_t outputSize = (inputSize + stride - 1) / stride;
515 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
516 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
517 if (temp > inputSize)
518 {
519 paddingFront = (temp - inputSize) / 2;
520 paddingBack = (temp - inputSize) - paddingFront;
521 }
522 }
523}
524
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000525TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
526 armnn::TensorInfo& constTensorInfo,
527 TfLiteContext* tfLiteContext,
528 const TfLiteTensor& tfLiteTensor,
529 armnnDelegate::DelegateData& data,
530 unsigned int slotIndex)
531{
Keith Davis892fafe2020-11-26 17:40:35 +0000532 IgnoreUnused(layer);
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000533 bool isSupported = false;
534 FORWARD_LAYER_SUPPORT_FUNC(__func__,
535 tfLiteContext,
536 IsConstantSupported,
537 data.m_Backends,
538 isSupported,
539 constTensorInfo);
540 if (!isSupported)
541 {
542 return kTfLiteError;
543 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000544
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000545 auto constantInput = CreateConstTensor(&tfLiteTensor,
546 constTensorInfo,
547 armnn::Optional<armnn::PermutationVector&>());
548 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
549 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
550 outputSlot.SetTensorInfo(constTensorInfo);
551
552 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
553
554 return kTfLiteOk;
555}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000556
Sadik Armagan62483be2020-10-23 17:14:43 +0100557} // namespace anonymous