blob: 0537ba911b24adbdc4028127ff5dd572fe62c7d1 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9#include <armnn/BackendHelper.hpp>
10#include <armnn/utility/Assert.hpp>
Sadik Armagan67e95f22020-10-29 16:14:54 +000011#include <armnn/utility/NumericCast.hpp>
Sadik Armagan62483be2020-10-23 17:14:43 +010012
Sadik Armagan6e36a642020-11-10 21:18:41 +000013#include <armnnUtils/Permute.hpp>
14
Sadik Armagan62483be2020-10-23 17:14:43 +010015#include <tensorflow/lite/builtin_ops.h>
16#include <tensorflow/lite/c/builtin_op_data.h>
17#include <tensorflow/lite/c/common.h>
18#include <tensorflow/lite/minimal_logging.h>
19
Sadik Armagan05e9fd22020-11-17 12:01:47 +000020#include "tensorflow/lite/kernels/kernel_util.h"
21
Sadik Armagan62483be2020-10-23 17:14:43 +010022namespace
23{
24
25// Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support
26#define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \
27try \
28{ \
29 for (auto&& backendId : backends) \
30 { \
31 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
32 if (layerSupportObject) \
33 { \
34 std::string reasonIfUnsupported; \
35 supported = \
36 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
37 if (supported) \
38 { \
39 break; \
40 } \
41 else \
42 { \
43 if (reasonIfUnsupported.size() > 0) \
44 { \
45 TF_LITE_KERNEL_LOG( \
46 tfLiteContext, "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
47 } \
48 else \
49 { \
50 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by armnn", funcName); \
51 } \
52 } \
53 } \
54 else \
55 { \
56 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
57 } \
58 } \
59 if (!supported) \
60 { \
61 TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \
62 } \
63} \
64catch (const armnn::InvalidArgumentException &e) \
65{ \
66 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
67}
68
69TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext,
70 TfLiteNode* tfLiteNode,
71 const unsigned int expectedSize,
72 int nodeIndex)
73{
74 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000075 if (static_cast<unsigned int >(numInputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010076 {
77 TF_LITE_MAYBE_KERNEL_LOG(
78 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d",
79 numInputs, expectedSize, nodeIndex);
80 return kTfLiteError;
81 }
82 return kTfLiteOk;
83}
84
85TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext,
86 TfLiteNode* tfLiteNode,
87 const unsigned int expectedSize,
88 int nodeIndex)
89{
90 auto numOutputs = tfLiteNode->outputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000091 if (static_cast<unsigned int >(numOutputs) != expectedSize)
Sadik Armagan62483be2020-10-23 17:14:43 +010092 {
93 TF_LITE_MAYBE_KERNEL_LOG(
94 tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d",
95 numOutputs, expectedSize, nodeIndex);
96 return kTfLiteError;
97 }
98 return kTfLiteOk;
99}
100
Sadik Armagan6e36a642020-11-10 21:18:41 +0000101bool IsValid(const TfLiteTensor* tfLiteTensor)
102{
103 return tfLiteTensor == nullptr ? false : true;
104}
105
Sadik Armagan32ca1442020-11-13 17:51:56 +0000106uint32_t NonNegative(int32_t value, int nodeIndex)
107{
108 if (value < 0)
109 {
110 throw armnn::Exception("TfLiteArmnnDelegate: Non-negative value in node " + nodeIndex);
111 }
112 else
113 {
114 return static_cast<uint32_t>(value);
115 }
116}
117
Sadik Armagan62483be2020-10-23 17:14:43 +0100118bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor)
119{
120 auto tensorAllocationType = tfLiteTensor.allocation_type;
121 if (tensorAllocationType == kTfLiteDynamic)
122 {
123 return true;
124 }
125 return false;
126}
127
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000128bool IsAffineQuantization(const TfLiteTensor& tfLiteTensor)
129{
130 auto quantizationInfo = tfLiteTensor.quantization;
131 if (quantizationInfo.type == kTfLiteAffineQuantization)
132 {
133 return true;
134 }
135 return false;
136}
137
Sadik Armagan67e95f22020-10-29 16:14:54 +0000138TfLiteStatus Connect(armnn::IConnectableLayer* layer,
139 TfLiteNode* tfLiteNode,
140 armnnDelegate::DelegateData& data)
141{
Finn Williams6f9f9902020-11-13 13:23:15 +0000142 ARMNN_ASSERT(static_cast<unsigned int >(tfLiteNode->outputs->size) == layer->GetNumOutputSlots());
Sadik Armagan67e95f22020-10-29 16:14:54 +0000143
144 // Connect the input slots
145 for (unsigned int inputIndex = 0; inputIndex < layer->GetNumInputSlots(); ++inputIndex)
146 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000147 if (data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]] != nullptr)
148 {
149 data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer->GetInputSlot(inputIndex));
150 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000151 }
152
153 // Prepare output slots
154 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
155 {
156 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000157 data.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000158 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000159
Sadik Armagan67e95f22020-10-29 16:14:54 +0000160 return kTfLiteOk;
161}
162
163armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
164 const armnn::TensorInfo& inputInfo1,
165 armnn::IConnectableLayer* startLayer,
166 TfLiteContext* tfLiteContext,
167 TfLiteNode* tfLiteNode,
168 armnnDelegate::DelegateData& delegateData)
169{
170 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
171 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
172
173 if (inputDimensions0 == inputDimensions1)
174 {
175 auto status = Connect(startLayer, tfLiteNode, delegateData);
Sadik Armagan8b9858d2020-11-09 08:26:22 +0000176 return status == kTfLiteOk ? startLayer : nullptr;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000177 }
178
179 unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
Finn Williams6f9f9902020-11-13 13:23:15 +0000180 unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
181 armnn::numeric_cast<int>(inputDimensions1)));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000182
183 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
184 const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
185 const armnn::TensorShape& smallShape = smallInfo.GetShape();
186
187 std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
188 for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
189 {
190 reshapedDimensions[i] = smallShape[i - dimDifference];
191 }
192
193 armnn::TensorInfo reshapedInfo = smallInfo;
194 reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
195 reshapedDimensions.data() });
196
197 armnn::ReshapeDescriptor reshapeDescriptor;
198 bool isSupported = false;
199 FORWARD_LAYER_SUPPORT_FUNC(__func__,
200 tfLiteContext,
201 IsReshapeSupported,
202 delegateData.m_Backends,
203 isSupported,
204 smallInfo,
205 reshapedInfo,
206 reshapeDescriptor);
207 if (!isSupported)
208 {
209 return nullptr;
210 }
211
212 ARMNN_ASSERT(delegateData.m_Network != nullptr);
213 // Add Reshape layer
214 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
215
216 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
217 ARMNN_ASSERT(reshapeLayer != nullptr);
218 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
219
220 if (input0IsSmaller)
221 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000222 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
223 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000224 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
Finn Williams6f9f9902020-11-13 13:23:15 +0000225 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
226 ->Connect(startLayer->GetInputSlot(1));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000227 }
228 else
229 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000230 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
231 ->Connect(reshapeLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000232 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
Finn Williams6f9f9902020-11-13 13:23:15 +0000233 delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
234 ->Connect(startLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000235 }
236
237 // Prepare output slots
238 for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
239 {
240 armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000241 delegateData.m_OutputSlotForNode
242 [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000243 }
244
245 return reshapeLayer;
246}
247
248TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
249 TfLiteNode* tfLiteNode,
250 TfLiteFusedActivation activationType,
251 armnn::IConnectableLayer* prevLayer,
252 unsigned int outputSlotIndex,
253 armnnDelegate::DelegateData& data)
254{
255
Finn Williams6f9f9902020-11-13 13:23:15 +0000256 const armnn::TensorInfo& activationOutputInfo = prevLayer->GetOutputSlot(outputSlotIndex).GetTensorInfo();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000257
258 armnn::ActivationDescriptor activationDesc;
259
260 switch (activationType)
261 {
262 case kTfLiteActNone:
263 {
264 // No Activation
265 return kTfLiteOk;
266 }
267 case kTfLiteActRelu:
268 {
269 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
270 break;
271 }
272 case kTfLiteActRelu1:
273 {
274 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
275 activationDesc.m_A = 1.0f;
276 activationDesc.m_B = -1.0f;
277 break;
278 }
279 case kTfLiteActRelu6:
280 {
281 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
282 activationDesc.m_A = 6.0f;
283 activationDesc.m_B = 0.0f;
284 break;
285 }
286 case kTfLiteActSigmoid:
287 {
288 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
289 break;
290 }
291 case kTfLiteActTanh:
292 {
293 activationDesc.m_Function = armnn::ActivationFunction::TanH;
294 activationDesc.m_A = 1.0f;
295 activationDesc.m_B = 1.0f;
296 break;
297 }
298 default:
299 return kTfLiteError;
300 }
301
302 bool isSupported = false;
303 FORWARD_LAYER_SUPPORT_FUNC(__func__,
304 tfLiteContext,
305 IsActivationSupported,
306 data.m_Backends,
307 isSupported,
308 prevLayer->GetOutputSlot(0).GetTensorInfo(),
309 activationOutputInfo,
310 activationDesc);
311 if (!isSupported)
312 {
313 return kTfLiteError;
314 }
315 armnn::IConnectableLayer* activationLayer = data.m_Network->AddActivationLayer(activationDesc);
316
317 ARMNN_ASSERT(activationLayer != nullptr);
318 activationLayer->GetOutputSlot(0).SetTensorInfo(activationOutputInfo);
319
320 // Connect and prepare output slots
321 for (unsigned int outputIndex = 0; outputIndex < activationLayer->GetNumOutputSlots(); ++outputIndex)
322 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000323 data.m_OutputSlotForNode[static_cast<unsigned long>(
324 tfLiteNode->outputs->data[outputIndex])]->Connect(activationLayer->GetInputSlot(0));
Sadik Armagan67e95f22020-10-29 16:14:54 +0000325 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(outputIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +0000326 data.m_OutputSlotForNode[static_cast<unsigned long>(
327 tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000328 }
329 return kTfLiteOk;
330}
331
Sadik Armagan6e36a642020-11-10 21:18:41 +0000332armnn::DataType GetDataType(const TfLiteTensor& tfLiteTensor)
Sadik Armagan62483be2020-10-23 17:14:43 +0100333{
Sadik Armagan62483be2020-10-23 17:14:43 +0100334 switch (tfLiteTensor.type)
335 {
336 case kTfLiteBool:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000337 return armnn::DataType::Boolean;
Sadik Armagan62483be2020-10-23 17:14:43 +0100338 case kTfLiteFloat32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000339 return armnn::DataType::Float32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100340 case kTfLiteFloat16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000341 return armnn::DataType::Float16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100342 case kTfLiteUInt8:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000343 return armnn::DataType::QAsymmU8;
Sadik Armagan62483be2020-10-23 17:14:43 +0100344 case kTfLiteInt8:
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000345 if (tfLiteTensor.params.zero_point == 0)
346 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000347 return armnn::DataType::QSymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000348 }
349 else
350 {
Sadik Armagan6e36a642020-11-10 21:18:41 +0000351 return armnn::DataType::QAsymmS8;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000352 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100353 case kTfLiteInt16:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000354 return armnn::DataType::QSymmS16;
Sadik Armagan62483be2020-10-23 17:14:43 +0100355 case kTfLiteInt32:
Sadik Armagan6e36a642020-11-10 21:18:41 +0000356 return armnn::DataType::Signed32;
Sadik Armagan62483be2020-10-23 17:14:43 +0100357 default:
Finn Williams6f9f9902020-11-13 13:23:15 +0000358 throw armnn::Exception(&"TfLiteArmnnDelegate: Unsupported data type: " [ tfLiteTensor.type]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100359 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000360}
Sadik Armagan62483be2020-10-23 17:14:43 +0100361
Sadik Armagan32ca1442020-11-13 17:51:56 +0000362armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor,
363 const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
Sadik Armagan6e36a642020-11-10 21:18:41 +0000364{
365 armnn::DataType type = GetDataType(tfLiteTensor);
Sadik Armagan62483be2020-10-23 17:14:43 +0100366 armnn::TensorInfo ret;
367 auto tensorDimensionSize = tfLiteTensor.dims->size;
368 if (tensorDimensionSize == 0)
369 {
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000370 if(tflite::IsConstantTensor(&tfLiteTensor))
371 {
372 std::vector<unsigned int> safeShape = { 1 };
373 bool dimensionsSpecificity[1] = { true };
374 armnn::TensorShape tensorShape(armnn::numeric_cast<unsigned int>(safeShape.size()),
375 safeShape.data(),
376 dimensionsSpecificity);
377 ret = armnn::TensorInfo(tensorShape, type);
378 }
379 else
380 {
381 armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified);
382 ret = armnn::TensorInfo(tensorShape, type);
383 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100384 }
385 else
386 {
Finn Williams6f9f9902020-11-13 13:23:15 +0000387 std::vector<unsigned int> tensorDims(static_cast<unsigned int>(tensorDimensionSize));
Sadik Armagan62483be2020-10-23 17:14:43 +0100388 bool dimensionsSpecificity[5] = { true, true, true, true, true };
Finn Williams6f9f9902020-11-13 13:23:15 +0000389 for (unsigned int i = 0; i < static_cast<unsigned int>(tensorDimensionSize); ++i) {
Sadik Armagan62483be2020-10-23 17:14:43 +0100390 auto dim = tfLiteTensor.dims->data[i];
391 if (dim == 0)
392 {
393 dimensionsSpecificity[i] = false;
394 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000395 tensorDims[i] = static_cast<unsigned int>(dim);
Sadik Armagan62483be2020-10-23 17:14:43 +0100396 }
Finn Williams6f9f9902020-11-13 13:23:15 +0000397 armnn::TensorShape tensorShape(static_cast<unsigned int>(tensorDimensionSize),
398 tensorDims.data(),
399 dimensionsSpecificity);
Sadik Armagan62483be2020-10-23 17:14:43 +0100400 ret = armnn::TensorInfo(tensorShape, type);
401 }
402
403 auto quantizationInfo = tfLiteTensor.quantization;
404 if (quantizationInfo.type == kTfLiteAffineQuantization)
405 {
406 // get per-channel quantization parameters
407 const auto* affineQuantization =
408 reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000409 if (affineQuantization->scale->size > 1)
Sadik Armagan62483be2020-10-23 17:14:43 +0100410 {
Sadik Armagan67e95f22020-10-29 16:14:54 +0000411 std::vector<float> quantizationScales;
Finn Williams6f9f9902020-11-13 13:23:15 +0000412 for (unsigned int i = 1; i < static_cast<unsigned int>(affineQuantization->scale->size); ++i)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000413 {
414 quantizationScales.push_back(affineQuantization->scale->data[i]);
415 }
416 ret.SetQuantizationScales(quantizationScales);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000417 ret.SetQuantizationDim(dimensionMappings[armnn::numeric_cast<unsigned int>(
418 affineQuantization->quantized_dimension)]);
Sadik Armagan62483be2020-10-23 17:14:43 +0100419 }
Sadik Armagan67e95f22020-10-29 16:14:54 +0000420 else
421 {
422 ret.SetQuantizationScale(affineQuantization->scale->data[0]);
423 ret.SetQuantizationOffset(affineQuantization->zero_point->data[0]);
424 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100425 }
426 else
427 {
428 auto quantizationParameters = tfLiteTensor.params;
429 ret.SetQuantizationScale(quantizationParameters.scale);
430 ret.SetQuantizationOffset(quantizationParameters.zero_point);
431 }
432
433 return ret;
434}
435
Sadik Armagan4189cc52020-11-11 18:01:48 +0000436armnn::ConstTensor CreateConstTensor(const TfLiteTensor* tfLiteTensor,
437 armnn::TensorInfo& tensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000438 armnn::Optional<armnn::PermutationVector&> permutationVector,
439 void* permutationData = nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000440{
Sadik Armagan4189cc52020-11-11 18:01:48 +0000441 if (tfLiteTensor->allocation_type != kTfLiteMmapRo)
442 {
443 throw armnn::Exception("TfLiteArmnnDelegate: Not constant allocation type: " + tfLiteTensor->allocation_type);
444 }
Sadik Armagan6e36a642020-11-10 21:18:41 +0000445
Sadik Armagan32ca1442020-11-13 17:51:56 +0000446 if (permutationVector.has_value() && permutationVector.value().GetSize() > 0 && permutationData != nullptr)
Sadik Armagan6e36a642020-11-10 21:18:41 +0000447 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000448 armnnUtils::Permute(armnnUtils::Permuted(tensorInfo.GetShape(), permutationVector.value()),
Sadik Armagan6e36a642020-11-10 21:18:41 +0000449 permutationVector.value(),
Sadik Armagan4189cc52020-11-11 18:01:48 +0000450 tfLiteTensor->data.data,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000451 permutationData,
Sadik Armagan4189cc52020-11-11 18:01:48 +0000452 armnn::GetDataTypeSize(tensorInfo.GetDataType()));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000453
454 return armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, permutationVector.value()), permutationData);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000455 }
456 else
457 {
Sadik Armagan4189cc52020-11-11 18:01:48 +0000458 return armnn::ConstTensor(tensorInfo, tfLiteTensor->data.data);
Sadik Armagan6e36a642020-11-10 21:18:41 +0000459 }
460}
461
Sadik Armagan32ca1442020-11-13 17:51:56 +0000462void CalcPadding(uint32_t inputSize,
463 uint32_t filterSize,
464 uint32_t stride,
465 uint32_t dilation,
466 uint32_t& paddingFront,
467 uint32_t& paddingBack,
468 TfLitePadding padding)
469{
470 paddingFront = 0;
471 paddingBack = 0;
472 if (padding == kTfLitePaddingSame)
473 {
474 uint32_t outputSize = (inputSize + stride - 1) / stride;
475 uint32_t dilatedSize = filterSize + (dilation - 1) * (filterSize - 1);
476 uint32_t temp = (outputSize - 1) * stride + dilatedSize;
477 if (temp > inputSize)
478 {
479 paddingFront = (temp - inputSize) / 2;
480 paddingBack = (temp - inputSize) - paddingFront;
481 }
482 }
483}
484
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000485TfLiteStatus ConnectConstant(armnn::IConnectableLayer* layer,
486 armnn::TensorInfo& constTensorInfo,
487 TfLiteContext* tfLiteContext,
488 const TfLiteTensor& tfLiteTensor,
489 armnnDelegate::DelegateData& data,
490 unsigned int slotIndex)
491{
492 bool isSupported = false;
493 FORWARD_LAYER_SUPPORT_FUNC(__func__,
494 tfLiteContext,
495 IsConstantSupported,
496 data.m_Backends,
497 isSupported,
498 constTensorInfo);
499 if (!isSupported)
500 {
501 return kTfLiteError;
502 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000503
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000504 auto constantInput = CreateConstTensor(&tfLiteTensor,
505 constTensorInfo,
506 armnn::Optional<armnn::PermutationVector&>());
507 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(constantInput);
508 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
509 outputSlot.SetTensorInfo(constTensorInfo);
510
511 data.m_OutputSlotForNode[static_cast<unsigned long>(slotIndex)] = &outputSlot;
512
513 return kTfLiteOk;
514}
Sadik Armagan32ca1442020-11-13 17:51:56 +0000515
Sadik Armagan62483be2020-10-23 17:14:43 +0100516} // namespace anonymous