blob: 4305224003f77470f68ffa581edb1a3911d1c204 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Teresa Charlinecebb0f2023-04-27 21:37:56 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9
10namespace armnnOpaqueDelegate
11{
12
13TfLiteStatus VisitPadOperator(DelegateData& delegateData,
14 TfLiteOpaqueContext* tfLiteContext,
15 TfLiteOpaqueNode* tfLiteNode,
16 int nodeIndex,
17 int32_t tfLitePadOperatorCode)
18{
19 switch(tfLitePadOperatorCode)
20 {
21 case kTfLiteBuiltinMirrorPad:
22 case kTfLiteBuiltinPad:
23 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
24 break;
25 case kTfLiteBuiltinPadv2:
26 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
27 break;
28 default:
29 return kTfLiteError;
30 }
31
32 // Inputs
33 int numInputs = 0;
34 const int* inputTensors;
35 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
36 {
37 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
38 tfLiteContext,
39 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
40 nodeIndex);
41 return kTfLiteError;
42 }
43
44 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
45 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLitePadOperatorCode, nodeIndex))
46 {
47 return kTfLiteError;
48 }
49
50 const TfLiteOpaqueTensor* tfLitePaddingTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
51 if (!IsValid(tfLiteContext, tfLitePaddingTensor, tfLitePadOperatorCode, nodeIndex))
52 {
53 return kTfLiteError;
54 }
55
56 // Output
57 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
58
59 int numOutputs = 0;
60 const int* outputTensors;
61 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
62 {
63 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
64 tfLiteContext,
65 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
66 nodeIndex);
67 return kTfLiteError;
68 }
69
70 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
71 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLitePadOperatorCode, nodeIndex))
72 {
73 return kTfLiteError;
74 }
75
76 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
77 const armnn::TensorInfo& paddingTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitePaddingTensor);
78 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
79
80 // Get the padding data from the input tensor
81 auto* paddingData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLitePaddingTensor));
82
83 size_t step = 2;
84 armnn::PadDescriptor descriptor;
85 for (unsigned int i = 0; i < paddingTensorInfo.GetNumElements() / step; ++i)
86 {
87 descriptor.m_PadList.emplace_back(paddingData[i * step], paddingData[i * step + 1]);
88 }
89
90 if (tfLitePadOperatorCode == kTfLiteBuiltinPad && inputTensorInfo.IsQuantized())
91 {
92 descriptor.m_PadValue = inputTensorInfo.GetQuantizationOffset();
93 }
94 else if (tfLitePadOperatorCode == kTfLiteBuiltinPadv2)
95 {
96 const TfLiteOpaqueTensor* tfLitepaddingValue = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
97 inputTensors[2]);
98 armnn::TensorInfo paddingValueTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitepaddingValue);
99 if (paddingValueTensorInfo.GetNumElements() != 1)
100 {
101 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
102 tfLiteContext,
103 "TfLiteArmnnOpaqueDelegate: Multiple padding value are not supported in operator #%d node #%d: ",
104 tfLitePadOperatorCode, nodeIndex);
105 return kTfLiteError;
106 }
107 // Get the padding value from the input tensor
108 switch (TfLiteOpaqueTensorType(tfLitepaddingValue))
109 {
110 case kTfLiteFloat32:
111 descriptor.m_PadValue = static_cast<float*>(TfLiteOpaqueTensorData(tfLitepaddingValue))[0];
112 break;
113 case kTfLiteUInt8:
114 descriptor.m_PadValue = static_cast<uint8_t*>(TfLiteOpaqueTensorData(tfLitepaddingValue))[0];
115 break;
116 case kTfLiteInt8:
117 descriptor.m_PadValue = static_cast<int8_t*>(TfLiteOpaqueTensorData(tfLitepaddingValue))[0];
118 break;
119 default:
120 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
121 tfLiteContext,
122 "TfLiteArmnnOpaqueDelegate: Padding value datatype is not supported in operator #%d node #%d: ",
123 tfLitePadOperatorCode, nodeIndex);
124 return kTfLiteError;
125 }
126 }
127 else if (tfLitePadOperatorCode == kTfLiteBuiltinMirrorPad)
128 {
129 auto* options = reinterpret_cast<TfLiteMirrorPaddingParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
130
131 if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect)
132 {
133 descriptor.m_PaddingMode = armnn::PaddingMode::Reflect;
134 }
135 else if (options->mode == TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric)
136 {
137 descriptor.m_PaddingMode = armnn::PaddingMode::Symmetric;
138 }
139 else
140 {
141 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
142 tfLiteContext,
143 "TfLiteArmnnOpaqueDelegate: PaddingMode must be either REFLECT or SYMMETRIC "
144 "in operator #%d node #%d: ",
145 tfLitePadOperatorCode, nodeIndex);
146 }
147
148 // If padding mode is Reflect then both paddings must be no greater than inputShape(i) - 1.
149 // If padding mode is Symmetric then both paddings must be no greater than inputShape(i).
150 auto inputShape = inputTensorInfo.GetShape();
151 auto padList = descriptor.m_PadList;
152
153 const auto isReflect = static_cast<unsigned int>(descriptor.m_PaddingMode == armnn::PaddingMode::Reflect);
154 for(unsigned int i = 0; i < padList.size(); ++i)
155 {
156 if(padList.at(i).first > (inputShape[i] - isReflect) ||
157 padList.at(i).second > (inputShape[i] - isReflect))
158 {
159 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
160 tfLiteContext,
161 "TfLiteArmnnOpaqueDelegate: Padding values must be less (Reflect) or "
162 "equal (Symmetric) to the dimension size in operator #%d node #%d: ",
163 tfLitePadOperatorCode, nodeIndex);
164 }
165 }
166 }
167
168 armnn::BackendId setBackend;
169 if (!delegateData.m_Network)
170 {
171 bool isSupported = false;
172 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("PAD",
173 tfLiteContext,
174 IsPadSupported,
175 delegateData.m_Backends,
176 isSupported,
177 setBackend,
178 inputTensorInfo,
179 outputTensorInfo,
180 descriptor);
181
182 return isSupported ? kTfLiteOk : kTfLiteError;
183 }
184
Mike Kellya2806502023-08-03 10:42:11 +0100185 auto layerName = GetName(armnn::LayerType::Pad, nodeIndex);
186 armnn::IConnectableLayer* padLayer = delegateData.m_Network->AddPadLayer(descriptor, layerName.c_str());
Teresa Charlinecebb0f2023-04-27 21:37:56 +0100187 padLayer->SetBackendId(setBackend);
188 ARMNN_ASSERT(padLayer != nullptr);
189
190 armnn::IOutputSlot& outputSlot = padLayer->GetOutputSlot(0);
191 outputSlot.SetTensorInfo(outputTensorInfo);
192
193 return Connect(padLayer, tfLiteContext, tfLiteNode, delegateData);
194}
195
196} // namespace armnnOpaqueDelegate