blob: 431b8d33f2a8979b9fbd4f5dccd3c4ce56d6ae54 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <tensorflow/lite/builtin_ops.h>
9#include <tensorflow/lite/c/builtin_op_data.h>
10#include <tensorflow/lite/c/common.h>
11#include <tensorflow/lite/minimal_logging.h>
12
13namespace armnnDelegate
14{
15
16TfLiteStatus VisitPadOperator(DelegateData& delegateData,
17 TfLiteContext* tfLiteContext,
18 TfLiteNode* tfLiteNode,
19 int nodeIndex,
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000020 int32_t tfLitePadOperatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +010021{
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000022 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
Finn Williams6f9f9902020-11-13 13:23:15 +000023
Narumol Prangnawarat958024b2020-12-17 12:17:58 +000024 switch(tfLitePadOperatorCode)
25 {
26 case kTfLiteBuiltinPad:
27 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
28 break;
29 case kTfLiteBuiltinPadv2:
30 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
31 break;
32 default:
33 return kTfLiteError;
34 }
35
36 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
37 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
38 const TfLiteTensor& tfLitepaddingTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
39
40 if (IsDynamicTensor(tfLiteInputTensor))
41 {
42 TF_LITE_MAYBE_KERNEL_LOG(
43 tfLiteContext,
44 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
45 tfLitePadOperatorCode, nodeIndex);
46 return kTfLiteError;
47 }
48
49 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
50 if (IsDynamicTensor(tfLiteOutputTensor))
51 {
52 TF_LITE_MAYBE_KERNEL_LOG(
53 tfLiteContext,
54 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
55 tfLitePadOperatorCode, nodeIndex);
56 return kTfLiteError;
57 }
58
59 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
60 const armnn::TensorInfo& paddingTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingTensor);
61 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
62
63 // Get the padding data from the input tensor
64 auto* paddingData = tflite::GetTensorData<int32_t>(&tfLitepaddingTensor);
65
66 size_t step = 2;
67 armnn::PadDescriptor descriptor;
68 for (unsigned int i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i)
69 {
70 descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]);
71 }
72
73 if (tfLitePadOperatorCode == kTfLiteBuiltinPad && inputTensorInfo.IsQuantized())
74 {
75 descriptor.m_PadValue = inputTensorInfo.GetQuantizationOffset();
76 }
77 else if (tfLitePadOperatorCode == kTfLiteBuiltinPadv2)
78 {
79 const TfLiteTensor& tfLitepaddingValue = tfLiteTensors[tfLiteNode->inputs->data[2]];
80 armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteTensor(tfLitepaddingValue);
81 if (paddingValueTensorInfo.GetNumElements() != 1)
82 {
83 TF_LITE_MAYBE_KERNEL_LOG(
84 tfLiteContext,
85 "TfLiteArmnnDelegate: Multiple padding value are not supported in operator #%d node #%d: ",
86 tfLitePadOperatorCode, nodeIndex);
87 return kTfLiteError;
88 }
89 // Get the padding value from the input tensor
90 switch (tfLitepaddingValue.type)
91 {
92 case kTfLiteFloat32:
93 descriptor.m_PadValue = tflite::GetTensorData<float>(&tfLitepaddingValue)[0];
94 break;
95 case kTfLiteUInt8:
96 descriptor.m_PadValue = tflite::GetTensorData<uint8>(&tfLitepaddingValue)[0];
97 break;
98 case kTfLiteInt8:
99 descriptor.m_PadValue = tflite::GetTensorData<int8>(&tfLitepaddingValue)[0];
100 break;
Narumol Prangnawarat958024b2020-12-17 12:17:58 +0000101 default:
102 TF_LITE_MAYBE_KERNEL_LOG(
103 tfLiteContext,
104 "TfLiteArmnnDelegate: Padding value datatype is not supported in operator #%d node #%d: ",
105 tfLitePadOperatorCode, nodeIndex);
106 return kTfLiteError;
107 }
108 }
109
110 if (!delegateData.m_Network)
111 {
112 bool isSupported = false;
113 FORWARD_LAYER_SUPPORT_FUNC(__func__,
114 tfLiteContext,
115 IsPadSupported,
116 delegateData.m_Backends,
117 isSupported,
118 inputTensorInfo,
119 outputTensorInfo,
120 descriptor);
121
122 return isSupported ? kTfLiteOk : kTfLiteError;
123 }
124
125 armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor);
126 ARMNN_ASSERT(padLayer != nullptr);
127
128 armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
129 outputSlot.SetTensorInfo(outputTensorInfo);
130
131 return Connect(padLayer, tfLiteNode, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100132}
133
134} // namespace armnnDelegate