blob: 45a10f383301d8cfe81f63a64d5803cae8fd974d [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Matthew Sloyan48ec8132023-04-27 17:04:47 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9#include <SharedFunctions.hpp>
10
11#include <flatbuffers/flexbuffers.h>
12
13namespace armnnOpaqueDelegate
14{
15
16TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
17 TfLiteOpaqueContext* tfLiteContext,
18 TfLiteOpaqueNode* tfLiteNode,
19 int nodeIndex,
20 int32_t tfLitePoolingOperatorCode)
21{
22 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
23 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
24
25 // Gather input indices and use to get input tensors.
26 int numInputs = 0;
27 const int* inputTensors;
28 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
29 {
30 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
31 tfLiteContext,
32 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
33 nodeIndex);
34 return kTfLiteError;
35 }
36
37 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
38 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLitePoolingOperatorCode, nodeIndex))
39 {
40 return kTfLiteError;
41 }
42
43 // Gather output indices and use to get output tensors.
44 int numOutputs = 0;
45 const int* outputTensors;
46 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
47 {
48 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
49 tfLiteContext,
50 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
51 nodeIndex);
52 return kTfLiteError;
53 }
54
55 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
56 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLitePoolingOperatorCode, nodeIndex))
57 {
58 return kTfLiteError;
59 }
60
61 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
62 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
63
64 auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
65 TfLiteFusedActivation activationType = kTfLiteActNone;
66 if (tfLiteNodeParameters)
67 {
68 activationType = tfLiteNodeParameters->activation;
69 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
70 tfLiteContext,
71 outputTensorInfo,
72 outputTensorInfo,
73 activationType);
74 if(activationStatus != kTfLiteOk)
75 {
76 return kTfLiteError;
77 }
78 }
79
80 armnn::PoolingAlgorithm poolingAlgorithm;
81 switch(tfLitePoolingOperatorCode)
82 {
83 case kTfLiteBuiltinAveragePool2d:
84 poolingAlgorithm = armnn::PoolingAlgorithm::Average;
85 break;
86 case kTfLiteBuiltinL2Pool2d:
87 poolingAlgorithm = armnn::PoolingAlgorithm::L2;
88 break;
89 case kTfLiteBuiltinMaxPool2d:
90 poolingAlgorithm = armnn::PoolingAlgorithm::Max;
91 break;
92 default:
93 return kTfLiteError;
94 }
95
96 armnn::Pooling2dDescriptor descriptor;
97 descriptor.m_PoolType = poolingAlgorithm;
98
99 descriptor.m_PoolWidth = tfLiteNodeParameters->filter_width;
100 descriptor.m_PoolHeight = tfLiteNodeParameters->filter_height;
101 descriptor.m_StrideX = tfLiteNodeParameters->stride_width;
102 descriptor.m_StrideY = tfLiteNodeParameters->stride_height;
103 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
104
105 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
106 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
107
108 CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
109 descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
110 CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
111 descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
112
113 bool isSupported = false;
114 armnn::BackendId setBackend;
115 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
116 {
117 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("POOLING_2D",
118 tfLiteContext,
119 IsPooling2dSupported,
120 delegateData.m_Backends,
121 isSupported,
122 setBackend,
123 inputTensorInfo,
124 outputTensorInfo,
125 descriptor);
126 };
127
128 if (!delegateData.m_Network)
129 {
130 validateFunc(outputTensorInfo, isSupported);
131 return isSupported ? kTfLiteOk : kTfLiteError;
132 }
133
134 armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
135 poolingLayer->SetBackendId(setBackend);
136 ARMNN_ASSERT(poolingLayer != nullptr);
137
138 armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
139 outputSlot.SetTensorInfo(outputTensorInfo);
140
141 // try to connect the Constant Inputs if there are any
142 if(ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
143 {
144 return kTfLiteError;
145 }
146
147 if(Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
148 {
149 return kTfLiteError;
150 }
151
152 // Check and create activation
153 return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
154}
155
156TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
157 TfLiteOpaqueContext* tfLiteContext,
158 TfLiteOpaqueNode* tfLiteNode,
159 int nodeIndex,
160 std::string customOperatorName)
161{
162 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
163 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
164
165 // Gather input indices and use to get input tensors.
166 int numInputs = 0;
167 const int* inputTensors;
168 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
169 {
170 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
171 tfLiteContext,
172 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
173 nodeIndex);
174 return kTfLiteError;
175 }
176
177 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
178 if (!IsValid(tfLiteContext, tfLiteInputTensor, kTfLiteBuiltinCustom, nodeIndex))
179 {
180 return kTfLiteError;
181 }
182
183 // Gather output indices and use to get output tensors.
184 int numOutputs = 0;
185 const int* outputTensors;
186 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
187 {
188 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
189 tfLiteContext,
190 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
191 nodeIndex);
192 return kTfLiteError;
193 }
194
195 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
196 if (!IsValid(tfLiteContext, tfLiteOutputTensor, kTfLiteBuiltinCustom, nodeIndex))
197 {
198 return kTfLiteError;
199 }
200
201 // Set the input and output info
202 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
203 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
204
205 // Custom Operators are defined by the name string associated to the operator. Use this to determine
206 // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite.
207 armnn::PoolingAlgorithm poolingAlgorithm;
208 if (customOperatorName == "MaxPool3D")
209 {
210 poolingAlgorithm = armnn::PoolingAlgorithm::Max;
211 }
212 else if (customOperatorName == "AveragePool3D")
213 {
214 poolingAlgorithm = armnn::PoolingAlgorithm::Average;
215 }
216 else
217 {
218 return kTfLiteError;
219 }
220 // Create the armnn pool3d descriptor and set the algorithm parsed above.
221 armnn::Pooling3dDescriptor descriptor;
222 descriptor.m_PoolType = poolingAlgorithm;
223
224 // custom_initial_data and custom_initial_data_size are void* variables defined in the tflite registration
225 // used to access the custom option buffer for the operator.
226 const void* customData = nullptr;
227 int customDataSize = 0;
228 if (TfLiteOpaqueNodeGetCustomInitialData(tfLiteNode, &customData, &customDataSize) != kTfLiteOk)
229 {
230 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
231 tfLiteContext,
232 "TfLiteArmnnOpaqueDelegate: Unable to initialise initial custom data from node #%d: ",
233 nodeIndex);
234 return kTfLiteError;
235 }
236
237 // Reinterpret the void* to a byte buffer to access the options data in the flexbuffers map.
238 const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(customData),
239 customDataSize).AsMap();
240 // poolDims is a vector of [ 1, Depth, Height, Width, 1 ]
241 const auto poolDims = m["ksize"].AsTypedVector();
242 descriptor.m_PoolWidth = poolDims[3].AsInt32();
243 descriptor.m_PoolHeight = poolDims[2].AsInt32();
244 descriptor.m_PoolDepth = poolDims[1].AsInt32();
245
246 // strideDimes is a vector of [ 1, Z, Y, X, 1]
247 const auto strideDims = m["strides"].AsTypedVector();
248 descriptor.m_StrideX = strideDims[3].AsInt32();
249 descriptor.m_StrideY = strideDims[2].AsInt32();
250 descriptor.m_StrideZ = strideDims[1].AsInt32();
251 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
252
253 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
254 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
255 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
256
257 // CalcPadding expects a TfLitePadding type. Parse flexbuffers to extract padding string and create TfLitePadding.
258 std::string paddingStr = m["padding"].AsString().str();
259 TfLitePadding padding;
260 if (paddingStr == "VALID")
261 {
262 padding = kTfLitePaddingValid;
263 }
264 else if (paddingStr == "SAME")
265 {
266 padding = kTfLitePaddingSame;
267 }
268 else
269 {
270 padding = kTfLitePaddingUnknown;
271 }
272 // Calculates padding for each pooling dimension separately
273 CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
274 descriptor.m_PadTop, descriptor.m_PadBottom, padding);
275 CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
276 descriptor.m_PadLeft, descriptor.m_PadRight, padding);
277 CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u,
278 descriptor.m_PadFront, descriptor.m_PadBack, padding);
279
280
281 // Check activation by parsing the string from the flexbuffer map
282 std::string activationTypeStr = m["activation"].AsString().str();
283 TfLiteFusedActivation activationType = kTfLiteActNone;
284
285 if (activationTypeStr == "kTfLiteActRelu")
286 {
287 activationType = kTfLiteActRelu;
288 }
289 else if (activationTypeStr == "kTfLiteActReluN1To1")
290 {
291 activationType = kTfLiteActReluN1To1;
292 }
293 else if (activationTypeStr == "kTfLiteActRelu6")
294 {
295 activationType = kTfLiteActRelu6;
296 }
297 else if (activationTypeStr == "kTfLiteActTanh")
298 {
299 activationType = kTfLiteActTanh;
300 }
301 else if (activationTypeStr == "kTfLiteActSignBit")
302 {
303 activationType = kTfLiteActSignBit;
304 }
305 else if (activationTypeStr == "kTfLiteActSigmoid")
306 {
307 activationType = kTfLiteActSigmoid;
308 }
309 else
310 {
311 activationType = kTfLiteActNone;
312 }
313
314 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
315 tfLiteContext,
316 outputTensorInfo,
317 outputTensorInfo,
318 activationType);
319 if(activationStatus != kTfLiteOk)
320 {
321 return kTfLiteError;
322 }
323
324 // Validate the output info.
325 bool isSupported = false;
326 armnn::BackendId setBackend;
327 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
328 {
329 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("POOLING_3D",
330 tfLiteContext,
331 IsPooling3dSupported,
332 delegateData.m_Backends,
333 isSupported,
334 setBackend,
335 inputTensorInfo,
336 outputTensorInfo,
337 descriptor);
338 };
339
340 if (!delegateData.m_Network)
341 {
342 validateFunc(outputTensorInfo, isSupported);
343 return isSupported ? kTfLiteOk : kTfLiteError;
344 }
345
346 // Create the Layer
347 armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
348 poolingLayer->SetBackendId(setBackend);
349 ARMNN_ASSERT(poolingLayer != nullptr);
350
351 // Create and set output slots
352 armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
353 outputSlot.SetTensorInfo(outputTensorInfo);
354
355 // try to connect the Constant Inputs if there are any
356 if(ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
357 {
358 return kTfLiteError;
359 }
360
361 if(Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
362 {
363 return kTfLiteError;
364 }
365
366 return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
367}
368
369} // namespace armnnOpaqueDelegate