blob: dcad38503ae03ef4d0db1ab7bfa2fed2db1e0710 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9#include <armnn/BackendHelper.hpp>
10#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000011#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010012
Sadik Armagan6e36a642020-11-10 21:18:41 +000013#include <armnnUtils/Permute.hpp>
14
Sadik Armagan62483be2020-10-23 17:14:43 +010015#include <tensorflow/lite/builtin_ops.h>
16#include <tensorflow/lite/c/builtin_op_data.h>
17#include <tensorflow/lite/c/common.h>
18#include <tensorflow/lite/minimal_logging.h>
19
20namespace
21{
22
23// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
24#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
25try \
26{ \
27 for (auto&& backendId : backends) \
28 { \
29 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
30 if (layerSupportObject) \
31 { \
32 std::string reasonIfUnsupported; \
33 supported = \
34 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
35 if (supported) \
36 { \
37 break; \
38 } \
39 else \
40 { \
41 if (reasonIfUnsupported.size() > 0) \
42 { \
43 TF_LITE_KERNEL_LOG( \
44 tfLiteContext, "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
45 } \
46 else \
47 { \
48 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by armnn", funcName); \
49 } \
50 } \
51 } \
52 else \
53 { \
54 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
55 } \
56 } \
57 if (!supported) \
58 { \
59 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
60 } \
61} \
62catch (const armnn::InvalidArgumentException &e) \
63{ \
64 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
65}
66
67TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
68 TfLiteNode* tfLiteNode,
69 const unsigned int expectedSize,
70 int nodeIndex)
71{
72 auto numInputs = tfLiteNode->inputs->size;
73 if (numInputs != expectedSize)
74 {
75 TF_LITE_MAYBE_KERNEL_LOG(
76 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
77 numInputs, expectedSize, nodeIndex);
78 return kTfLiteError;
79 }
80 return kTfLiteOk;
81}
82
83TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
84 TfLiteNode* tfLiteNode,
85 const unsigned int expectedSize,
86 int nodeIndex)
87{
88 auto numOutputs = tfLiteNode->outputs->size;
89 if (numOutputs != expectedSize)
90 {
91 TF_LITE_MAYBE_KERNEL_LOG(
92 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
93 numOutputs, expectedSize, nodeIndex);
94 return kTfLiteError;
95 }
96 return kTfLiteOk;
97}
98
Sadik Armagan6e36a642020-11-10 21:18:41 +000099bool IsValid(const TfLiteTensor* tfLiteTensor)
100{
101 return tfLiteTensor == nullptr ? false : true;
102}
103
Sadik Armagan32ca1442020-11-13 17:51:56 +0000104uint32_t NonNegative(int32_t value, int nodeIndex)
105{
106 if (value < 0)
107 {
108 throw armnn::Exception("TfLiteArmnnDelegate: Non-negative value in node " + nodeIndex);
109 }
110 else
111 {
112 return static_cast<uint32_t>(value);
113 }
114}
115
Sadik Armagan62483be2020-10-23 17:14:43 +0100116bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
117{
118 auto tensorAllocationType = tfLiteTensor.allocation_type;
119 if (tensorAllocationType == kTfLiteDynamic)
120 {
121 return true;
122 }
123 return false;
124}
125
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000126bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
127{
128 auto quantizationInfo = tfLiteTensor.quantization;
129 if (quantizationInfo.type == kTfLiteAffineQuantization)
130 {
131 return true;
132 }
133 return false;
134}
135
Sadik Armagan67e95f22020-10-29 16:14:54 +0000136TfLiteStatus Connect(armnn::IConnectableLayer* layer,
137 TfLiteNode* tfLiteNode,
138 armnnDelegate::DelegateData& data)
139{
Sadik Armagan67e95f22020-10-29 16:14:54 +0000140 ARMNN_ASSERT(tfLiteNode->outputs->size == layer->GetNumOutputSlots());
141
142 // Connect the input slots
143 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
144 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000145 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
146 {
147 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
148 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000149 }
150
151 // Prepare output slots
152 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
153 {
154 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
155 data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot;
156 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000157
Sadik Armagan67e95f22020-10-29 16:14:54 +0000158 return kTfLiteOk;
159}
160
161armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
162 const armnn::TensorInfo& inputInfo1,
163 armnn::IConnectableLayer* startLayer,
164 TfLiteContext* tfLiteContext,
165 TfLiteNode* tfLiteNode,
166 armnnDelegate::DelegateData& delegateData)
167{
168 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
169 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
170
171 if (inputDimensions0 == inputDimensions1)
172 {
173 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000174 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000175 }
176
177 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
178 unsigned int dimDifference =
179 std::abs(armnn::numeric_cast<int>(inputDimensions0) - armnn::numeric_cast<int>(inputDimensions1));
180
181 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
182 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
183 const armnn::TensorShape& smallShape = smallInfo.GetShape();
184
185 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
186 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
187 {
188 reshapedDimensions[i] = smallShape[i - dimDifference];
189 }
190
191 armnn::TensorInfo reshapedInfo = smallInfo;
192 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
193 reshapedDimensions.data() });
194
195 armnn::ReshapeDescriptor reshapeDescriptor;
196 bool isSupported = false;
197 FORWARD_LAYER_SUPPORT_FUNC(__func__,
198 tfLiteContext,
199 IsReshapeSupported,
200 delegateData.m_Backends,
201 isSupported,
202 smallInfo,
203 reshapedInfo,
204 reshapeDescriptor);
205 if (!isSupported)
206 {
207 return nullptr;
208 }
209
210 ARMNN_ASSERT(delegateData.m_Network != nullptr);
211 // Add Reshape layer
212 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
213
214 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
215 ARMNN_ASSERT(reshapeLayer != nullptr);
216 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
217
218 if (input0IsSmaller)
219 {
220 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(reshapeLayer->GetInputSlot(0));
221 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
222 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(startLayer->GetInputSlot(1));
223 }
224 else
225 {
226 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(reshapeLayer->GetInputSlot(0));
227 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
228 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[0]]->Connect(startLayer->GetInputSlot(0));
229 }
230
231 // Prepare output slots
232 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
233 {
234 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
235 delegateData.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot;
236 }
237
238 return reshapeLayer;
239}
240
241TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
242 TfLiteNode* tfLiteNode,
243 TfLiteFusedActivation activationType,
244 armnn::IConnectableLayer* prevLayer,
245 unsigned int outputSlotIndex,
246 armnnDelegate::DelegateData& data)
247{
248
249 armnn::IOutputSlot& outputSlot = prevLayer->GetOutputSlot(outputSlotIndex);
250 const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
251
252 armnn::ActivationDescriptor activationDesc;
253
254 switch (activationType)
255 {
256 case kTfLiteActNone:
257 {
258 // No Activation
259 return kTfLiteOk;
260 }
261 case kTfLiteActRelu:
262 {
263 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
264 break;
265 }
266 case kTfLiteActRelu1:
267 {
268 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
269 activationDesc.m_A = 1.0f;
270 activationDesc.m_B = -1.0f;
271 break;
272 }
273 case kTfLiteActRelu6:
274 {
275 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
276 activationDesc.m_A = 6.0f;
277 activationDesc.m_B = 0.0f;
278 break;
279 }
280 case kTfLiteActSigmoid:
281 {
282 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
283 break;
284 }
285 case kTfLiteActTanh:
286 {
287 activationDesc.m_Function = armnn::ActivationFunction::TanH;
288 activationDesc.m_A = 1.0f;
289 activationDesc.m_B = 1.0f;
290 break;
291 }
292 default:
293 return kTfLiteError;
294 }
295
296 bool isSupported = false;
297 FORWARD_LAYER_SUPPORT_FUNC(__func__,
298 tfLiteContext,
299 IsActivationSupported,
300 data.m_Backends,
301 isSupported,
302 prevLayer->GetOutputSlot(0).GetTensorInfo(),
303 activationOutputInfo,
304 activationDesc);
305 if (!isSupported)
306 {
307 return kTfLiteError;
308 }
309 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
310
311 ARMNN_ASSERT(activationLayer != nullptr);
312 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
313
314 // Connect and prepare output slots
315 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
316 {
317 data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]]->Connect(activationLayer->GetInputSlot(0));
318 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
319 data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot;
320 }
321 return kTfLiteOk;
322}
323
Sadik Armagan6e36a642020-11-10 21:18:41 +0000324armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100325{
Sadik Armagan62483be2020-10-23 17:14:43 +0100326 switch (tfLiteTensor.type)
327 {
328 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000329 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100330 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000331 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100332 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000333 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100334 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000335 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100336 case kTfLiteInt8:
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000337 if (tfLiteTensor.params.zero_point == 0)
338 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000339 return armnn::DataType::QSymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000340 }
341 else
342 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000343 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000344 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100345 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000346 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100347 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000348 return armnn::DataType::Signed32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100349 default:
350 throw armnn::Exception("TfLiteArmnnDelegate: Unsupported data type: " + tfLiteTensor.type);
351 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000352}
Sadik Armagan62483be2020-10-23 17:14:43 +0100353
Sadik Armagan32ca1442020-11-13 17:51:56 +0000354armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor,
355 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Sadik Armagan6e36a642020-11-10 21:18:41 +0000356{
357 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100358 armnn::TensorInfo ret;
359 auto tensorDimensionSize = tfLiteTensor.dims->size;
360 if (tensorDimensionSize == 0)
361 {
362 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
363 ret = armnn::TensorInfo(tensorShape, type);
364 }
365 else
366 {
367 std::vector<unsigned int> tensorDims(tensorDimensionSize);
368 bool dimensionsSpecificity[5] = { true, true, true, true, true };
369 for (unsigned int i = 0; i < tensorDimensionSize; ++i) {
370 auto dim = tfLiteTensor.dims->data[i];
371 if (dim == 0)
372 {
373 dimensionsSpecificity[i] = false;
374 }
375 tensorDims[i] = dim;
376 }
377 armnn::TensorShape tensorShape(tensorDimensionSize, tensorDims.data(), dimensionsSpecificity);
378 ret = armnn::TensorInfo(tensorShape, type);
379 }
380
381 auto quantizationInfo = tfLiteTensor.quantization;
382 if (quantizationInfo.type == kTfLiteAffineQuantization)
383 {
384 // get per-channel quantization parameters
385 const auto* affineQuantization =
386 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000387 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100388 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000389 std::vector<float> quantizationScales;
390 for (unsigned int i = 1; i < affineQuantization->scale->size; ++i)
391 {
392 quantizationScales.push_back(affineQuantization->scale->data[i]);
393 }
394 ret.SetQuantizationScales(quantizationScales);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000395 ret.SetQuantizationDim(dimensionMappings[armnn::numeric_cast<unsigned int>(
396 affineQuantization->quantized_dimension)]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100397 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000398 else
399 {
400 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
401 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
402 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100403 }
404 else
405 {
406 auto quantizationParameters = tfLiteTensor.params;
407 ret.SetQuantizationScale(quantizationParameters.scale);
408 ret.SetQuantizationOffset(quantizationParameters.zero_point);
409 }
410
411 return ret;
412}
413
Sadik Armagan4189cc52020-11-11 18:01:48 +0000414armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
415 armnn::TensorInfo& tensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000416 armnn::Optional<armnn::PermutationVector&> permutationVector,
417 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000418{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000419 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
420 {
421 throw armnn::Exception("TfLiteArmnnDelegate: Not constant allocation type: " + tfLiteTensor->allocation_type);
422 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000423
Sadik Armagan32ca1442020-11-13 17:51:56 +0000424 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000425 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000426 armnnUtils::Permute(armnnUtils::Permuted(tensorInfo.GetShape(), permutationVector.value()),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000427 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000428 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000429 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000430 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000431
432 return armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, permutationVector.value()), permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000433 }
434 else
435 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000436 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000437 }
438}
439
Sadik Armagan32ca1442020-11-13 17:51:56 +0000440void CalcPadding(uint32_t inputSize,
441 uint32_t filterSize,
442 uint32_t stride,
443 uint32_t dilation,
444 uint32_t& paddingFront,
445 uint32_t& paddingBack,
446 TfLitePadding padding)
447{
448 paddingFront = 0;
449 paddingBack = 0;
450 if (padding == kTfLitePaddingSame)
451 {
452 uint32_t outputSize = (inputSize + stride - 1) / stride;
453 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
454 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
455 if (temp > inputSize)
456 {
457 paddingFront = (temp - inputSize) / 2;
458 paddingBack = (temp - inputSize) - paddingFront;
459 }
460 }
461}
462
463
464
Sadik Armagan62483be2020-10-23 17:14:43 +0100465} // namespace anonymous