blob: fad07ff267766aae3d95e6a50fdcf86f4ac0720b [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9#include <armnn/BackendHelper.hpp>
10#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000011#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010012
Sadik Armagan6e36a642020-11-10 21:18:41 +000013#include <armnnUtils/Permute.hpp>
14
Sadik Armagan62483be2020-10-23 17:14:43 +010015#include <tensorflow/lite/builtin_ops.h>
16#include <tensorflow/lite/c/builtin_op_data.h>
17#include <tensorflow/lite/c/common.h>
18#include <tensorflow/lite/minimal_logging.h>
19
Sadik Armagan05e9fd22020-11-17 12:01:47 +000020#include "tensorflow/lite/kernels/kernel_util.h"
21
Sadik Armagan62483be2020-10-23 17:14:43 +010022namespace
23{
24
25// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
26#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
27try \
28{ \
29 for (auto&& backendId : backends) \
30 { \
31 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
32 if (layerSupportObject) \
33 { \
34 std::string reasonIfUnsupported; \
35 supported = \
36 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
37 if (supported) \
38 { \
39 break; \
40 } \
41 else \
42 { \
43 if (reasonIfUnsupported.size() > 0) \
44 { \
45 TF_LITE_KERNEL_LOG( \
46 tfLiteContext, "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
47 } \
48 else \
49 { \
50 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by armnn", funcName); \
51 } \
52 } \
53 } \
54 else \
55 { \
56 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
57 } \
58 } \
59 if (!supported) \
60 { \
61 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
62 } \
63} \
64catch (const armnn::InvalidArgumentException &e) \
65{ \
66 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
67}
68
69TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
70 TfLiteNode* tfLiteNode,
71 const unsigned int expectedSize,
72 int nodeIndex)
73{
74 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000075 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010076 {
77 TF_LITE_MAYBE_KERNEL_LOG(
78 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
79 numInputs, expectedSize, nodeIndex);
80 return kTfLiteError;
81 }
82 return kTfLiteOk;
83}
84
85TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
86 TfLiteNode* tfLiteNode,
87 const unsigned int expectedSize,
88 int nodeIndex)
89{
90 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000091 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010092 {
93 TF_LITE_MAYBE_KERNEL_LOG(
94 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
95 numOutputs, expectedSize, nodeIndex);
96 return kTfLiteError;
97 }
98 return kTfLiteOk;
99}
100
Sadik Armagan6e36a642020-11-10 21:18:41 +0000101bool IsValid(const TfLiteTensor* tfLiteTensor)
102{
103 return tfLiteTensor == nullptr ? false : true;
104}
105
Sadik Armagan32ca1442020-11-13 17:51:56 +0000106uint32_t NonNegative(int32_t value, int nodeIndex)
107{
108 if (value < 0)
109 {
110 throw armnn::Exception("TfLiteArmnnDelegate: Non-negative value in node " + nodeIndex);
111 }
112 else
113 {
114 return static_cast<uint32_t>(value);
115 }
116}
117
Sadik Armagan62483be2020-10-23 17:14:43 +0100118bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
119{
120 auto tensorAllocationType = tfLiteTensor.allocation_type;
121 if (tensorAllocationType == kTfLiteDynamic)
122 {
123 return true;
124 }
125 return false;
126}
127
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000128bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
129{
130 auto quantizationInfo = tfLiteTensor.quantization;
131 if (quantizationInfo.type == kTfLiteAffineQuantization)
132 {
133 return true;
134 }
135 return false;
136}
137
Sadik Armagan67e95f22020-10-29 16:14:54 +0000138TfLiteStatus Connect(armnn::IConnectableLayer* layer,
139 TfLiteNode* tfLiteNode,
140 armnnDelegate::DelegateData& data)
141{
Finn Williams6f9f9902020-11-13 13:23:15 +0000142 ARMNN_ASSERT(static_cast<unsigned int >(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000143
144 // Connect the input slots
145 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
146 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000147 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
148 {
149 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
150 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000151 }
152
153 // Prepare output slots
154 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
155 {
156 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000157 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000158 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000159
Sadik Armagan67e95f22020-10-29 16:14:54 +0000160 return kTfLiteOk;
161}
162
163armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
164 const armnn::TensorInfo& inputInfo1,
165 armnn::IConnectableLayer* startLayer,
166 TfLiteContext* tfLiteContext,
167 TfLiteNode* tfLiteNode,
168 armnnDelegate::DelegateData& delegateData)
169{
170 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
171 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
172
173 if (inputDimensions0 == inputDimensions1)
174 {
175 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000176 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000177 }
178
179 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000180 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
181 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000182
183 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
184 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
185 const armnn::TensorShape& smallShape = smallInfo.GetShape();
186
187 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
188 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
189 {
190 reshapedDimensions[i] = smallShape[i - dimDifference];
191 }
192
193 armnn::TensorInfo reshapedInfo = smallInfo;
194 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
195 reshapedDimensions.data() });
196
197 armnn::ReshapeDescriptor reshapeDescriptor;
198 bool isSupported = false;
199 FORWARD_LAYER_SUPPORT_FUNC(__func__,
200 tfLiteContext,
201 IsReshapeSupported,
202 delegateData.m_Backends,
203 isSupported,
204 smallInfo,
205 reshapedInfo,
206 reshapeDescriptor);
207 if (!isSupported)
208 {
209 return nullptr;
210 }
211
212 ARMNN_ASSERT(delegateData.m_Network != nullptr);
213 // Add Reshape layer
214 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
215
216 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
217 ARMNN_ASSERT(reshapeLayer != nullptr);
218 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
219
220 if (input0IsSmaller)
221 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000222 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
223 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000224 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000225 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
226 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000227 }
228 else
229 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000230 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
231 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000232 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000233 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
234 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000235 }
236
237 // Prepare output slots
238 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
239 {
240 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000241 delegateData.m_OutputSlotForNode
242 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000243 }
244
245 return reshapeLayer;
246}
247
248TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
249 TfLiteNode* tfLiteNode,
250 TfLiteFusedActivation activationType,
251 armnn::IConnectableLayer* prevLayer,
252 unsigned int outputSlotIndex,
253 armnnDelegate::DelegateData& data)
254{
255
Finn Williams6f9f9902020-11-13 13:23:15 +0000256 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000257
258 armnn::ActivationDescriptor activationDesc;
259
260 switch (activationType)
261 {
262 case kTfLiteActNone:
263 {
264 // No Activation
265 return kTfLiteOk;
266 }
267 case kTfLiteActRelu:
268 {
269 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
270 break;
271 }
272 case kTfLiteActRelu1:
273 {
274 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
275 activationDesc.m_A = 1.0f;
276 activationDesc.m_B = -1.0f;
277 break;
278 }
279 case kTfLiteActRelu6:
280 {
281 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
282 activationDesc.m_A = 6.0f;
283 activationDesc.m_B = 0.0f;
284 break;
285 }
286 case kTfLiteActSigmoid:
287 {
288 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
289 break;
290 }
291 case kTfLiteActTanh:
292 {
293 activationDesc.m_Function = armnn::ActivationFunction::TanH;
294 activationDesc.m_A = 1.0f;
295 activationDesc.m_B = 1.0f;
296 break;
297 }
298 default:
299 return kTfLiteError;
300 }
301
302 bool isSupported = false;
303 FORWARD_LAYER_SUPPORT_FUNC(__func__,
304 tfLiteContext,
305 IsActivationSupported,
306 data.m_Backends,
307 isSupported,
308 prevLayer->GetOutputSlot(0).GetTensorInfo(),
309 activationOutputInfo,
310 activationDesc);
311 if (!isSupported)
312 {
313 return kTfLiteError;
314 }
315 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
316
317 ARMNN_ASSERT(activationLayer != nullptr);
318 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
319
320 // Connect and prepare output slots
321 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
322 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000323 data.m_OutputSlotForNode[static_cast<unsigned long>(
324 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000325 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000326 data.m_OutputSlotForNode[static_cast<unsigned long>(
327 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000328 }
329 return kTfLiteOk;
330}
331
Sadik Armagan6e36a642020-11-10 21:18:41 +0000332armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100333{
Sadik Armagan62483be2020-10-23 17:14:43 +0100334 switch (tfLiteTensor.type)
335 {
336 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000337 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100338 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000339 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100340 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000341 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100342 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000343 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100344 case kTfLiteInt8:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000345 {
346 auto quantizationInfo = tfLiteTensor.quantization;
347 if (quantizationInfo.type == kTfLiteAffineQuantization)
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000348 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000349 auto* quantization =
350 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
351 if (quantization->zero_point != nullptr && quantization->zero_point->size == 1)
352 {
353 return armnn::DataType::QAsymmS8;
354 }
355 else
356 {
357 return armnn::DataType::QSymmS8;
358 }
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000359 }
360 else
361 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000362 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000363 }
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000364 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100365 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000366 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100367 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000368 return armnn::DataType::Signed32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100369 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000370 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100371 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000372}
Sadik Armagan62483be2020-10-23 17:14:43 +0100373
Sadik Armagan32ca1442020-11-13 17:51:56 +0000374armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor,
375 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Sadik Armagan6e36a642020-11-10 21:18:41 +0000376{
377 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100378 armnn::TensorInfo ret;
379 auto tensorDimensionSize = tfLiteTensor.dims->size;
380 if (tensorDimensionSize == 0)
381 {
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000382 if(tflite::IsConstantTensor(&tfLiteTensor))
383 {
384 std::vector<unsigned int> safeShape = { 1 };
385 bool dimensionsSpecificity[1] = { true };
386 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
387 safeShape.data(),
388 dimensionsSpecificity);
389 ret = armnn::TensorInfo(tensorShape, type);
390 }
391 else
392 {
393 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
394 ret = armnn::TensorInfo(tensorShape, type);
395 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100396 }
397 else
398 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000399 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100400 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000401 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100402 auto dim = tfLiteTensor.dims->data[i];
403 if (dim == 0)
404 {
405 dimensionsSpecificity[i] = false;
406 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000407 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100408 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000409 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
410 tensorDims.data(),
411 dimensionsSpecificity);
Sadik Armagan62483be2020-10-23 17:14:43 +0100412 ret = armnn::TensorInfo(tensorShape, type);
413 }
414
415 auto quantizationInfo = tfLiteTensor.quantization;
416 if (quantizationInfo.type == kTfLiteAffineQuantization)
417 {
418 // get per-channel quantization parameters
419 const auto* affineQuantization =
420 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000421 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100422 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000423 std::vector<float> quantizationScales;
Finn Williams6f9f9902020-11-13 13:23:15 +0000424 for (unsigned int i = 1; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000425 {
426 quantizationScales.push_back(affineQuantization->scale->data[i]);
427 }
428 ret.SetQuantizationScales(quantizationScales);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000429 ret.SetQuantizationDim(dimensionMappings[armnn::numeric_cast<unsigned int>(
430 affineQuantization->quantized_dimension)]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100431 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000432 else
433 {
434 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
435 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
436 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100437 }
438 else
439 {
440 auto quantizationParameters = tfLiteTensor.params;
441 ret.SetQuantizationScale(quantizationParameters.scale);
442 ret.SetQuantizationOffset(quantizationParameters.zero_point);
443 }
444
445 return ret;
446}
447
Sadik Armagan4189cc52020-11-11 18:01:48 +0000448armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
449 armnn::TensorInfo& tensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000450 armnn::Optional<armnn::PermutationVector&> permutationVector,
451 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000452{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000453 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
454 {
455 throw armnn::Exception("TfLiteArmnnDelegate: Not constant allocation type: " + tfLiteTensor->allocation_type);
456 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000457
Sadik Armagan32ca1442020-11-13 17:51:56 +0000458 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000459 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000460 armnnUtils::Permute(armnnUtils::Permuted(tensorInfo.GetShape(), permutationVector.value()),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000461 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000462 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000463 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000464 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000465
466 return armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, permutationVector.value()), permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000467 }
468 else
469 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000470 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000471 }
472}
473
Sadik Armagan32ca1442020-11-13 17:51:56 +0000474void CalcPadding(uint32_t inputSize,
475 uint32_t filterSize,
476 uint32_t stride,
477 uint32_t dilation,
478 uint32_t& paddingFront,
479 uint32_t& paddingBack,
480 TfLitePadding padding)
481{
482 paddingFront = 0;
483 paddingBack = 0;
484 if (padding == kTfLitePaddingSame)
485 {
486 uint32_t outputSize = (inputSize + stride - 1) / stride;
487 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
488 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
489 if (temp > inputSize)
490 {
491 paddingFront = (temp - inputSize) / 2;
492 paddingBack = (temp - inputSize) - paddingFront;
493 }
494 }
495}
496
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000497TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
498 armnn::TensorInfo& constTensorInfo,
499 TfLiteContext* tfLiteContext,
500 const TfLiteTensor& tfLiteTensor,
501 armnnDelegate::DelegateData& data,
502 unsigned int slotIndex)
503{
504 bool isSupported = false;
505 FORWARD_LAYER_SUPPORT_FUNC(__func__,
506 tfLiteContext,
507 IsConstantSupported,
508 data.m_Backends,
509 isSupported,
510 constTensorInfo);
511 if (!isSupported)
512 {
513 return kTfLiteError;
514 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000515
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000516 auto constantInput = CreateConstTensor(&tfLiteTensor,
517 constTensorInfo,
518 armnn::Optional<armnn::PermutationVector&>());
519 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
520 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
521 outputSlot.SetTensorInfo(constTensorInfo);
522
523 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
524
525 return kTfLiteOk;
526}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000527
Sadik Armagan62483be2020-10-23 17:14:43 +0100528} // namespace anonymous