blob: 8e6500c1cb6bf674b2e76b799cb13d28cd20459a [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Matthew Sloyan48ec8132023-04-27 17:04:47 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9#include <SharedFunctions.hpp>
10
11#include <flatbuffers/flexbuffers.h>
12
13namespace armnnOpaqueDelegate
14{
15
16TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
17 TfLiteOpaqueContext* tfLiteContext,
18 TfLiteOpaqueNode* tfLiteNode,
19 int nodeIndex,
20 int32_t tfLitePoolingOperatorCode)
21{
22 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
23 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
24
25 // Gather input indices and use to get input tensors.
26 int numInputs = 0;
27 const int* inputTensors;
28 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
29 {
30 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
31 tfLiteContext,
32 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
33 nodeIndex);
34 return kTfLiteError;
35 }
36
37 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
38 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLitePoolingOperatorCode, nodeIndex))
39 {
40 return kTfLiteError;
41 }
42
43 // Gather output indices and use to get output tensors.
44 int numOutputs = 0;
45 const int* outputTensors;
46 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
47 {
48 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
49 tfLiteContext,
50 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
51 nodeIndex);
52 return kTfLiteError;
53 }
54
55 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
56 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLitePoolingOperatorCode, nodeIndex))
57 {
58 return kTfLiteError;
59 }
60
61 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
62 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
63
64 auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
65 TfLiteFusedActivation activationType = kTfLiteActNone;
66 if (tfLiteNodeParameters)
67 {
68 activationType = tfLiteNodeParameters->activation;
69 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
70 tfLiteContext,
71 outputTensorInfo,
72 outputTensorInfo,
73 activationType);
74 if(activationStatus != kTfLiteOk)
75 {
76 return kTfLiteError;
77 }
78 }
79
80 armnn::PoolingAlgorithm poolingAlgorithm;
81 switch(tfLitePoolingOperatorCode)
82 {
83 case kTfLiteBuiltinAveragePool2d:
84 poolingAlgorithm = armnn::PoolingAlgorithm::Average;
85 break;
86 case kTfLiteBuiltinL2Pool2d:
87 poolingAlgorithm = armnn::PoolingAlgorithm::L2;
88 break;
89 case kTfLiteBuiltinMaxPool2d:
90 poolingAlgorithm = armnn::PoolingAlgorithm::Max;
91 break;
92 default:
93 return kTfLiteError;
94 }
95
96 armnn::Pooling2dDescriptor descriptor;
97 descriptor.m_PoolType = poolingAlgorithm;
98
99 descriptor.m_PoolWidth = tfLiteNodeParameters->filter_width;
100 descriptor.m_PoolHeight = tfLiteNodeParameters->filter_height;
101 descriptor.m_StrideX = tfLiteNodeParameters->stride_width;
102 descriptor.m_StrideY = tfLiteNodeParameters->stride_height;
103 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
104
105 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
106 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
107
108 CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
109 descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
110 CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
111 descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
112
113 bool isSupported = false;
114 armnn::BackendId setBackend;
115 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
116 {
117 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("POOLING_2D",
118 tfLiteContext,
119 IsPooling2dSupported,
120 delegateData.m_Backends,
121 isSupported,
122 setBackend,
123 inputTensorInfo,
124 outputTensorInfo,
125 descriptor);
126 };
127
128 if (!delegateData.m_Network)
129 {
130 validateFunc(outputTensorInfo, isSupported);
131 return isSupported ? kTfLiteOk : kTfLiteError;
132 }
133
Mike Kellya2806502023-08-03 10:42:11 +0100134 auto layerName = GetName(armnn::LayerType::Pooling2d, nodeIndex);
135 armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor, layerName.c_str());
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100136 poolingLayer->SetBackendId(setBackend);
137 ARMNN_ASSERT(poolingLayer != nullptr);
138
139 armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
140 outputSlot.SetTensorInfo(outputTensorInfo);
141
142 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100143 if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100144 {
145 return kTfLiteError;
146 }
147
Mike Kellya2806502023-08-03 10:42:11 +0100148 if (Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100149 {
150 return kTfLiteError;
151 }
152
153 // Check and create activation
Mike Kellya2806502023-08-03 10:42:11 +0100154 return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100155}
156
157TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
158 TfLiteOpaqueContext* tfLiteContext,
159 TfLiteOpaqueNode* tfLiteNode,
160 int nodeIndex,
161 std::string customOperatorName)
162{
163 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
164 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
165
166 // Gather input indices and use to get input tensors.
167 int numInputs = 0;
168 const int* inputTensors;
169 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
170 {
171 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
172 tfLiteContext,
173 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
174 nodeIndex);
175 return kTfLiteError;
176 }
177
178 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
179 if (!IsValid(tfLiteContext, tfLiteInputTensor, kTfLiteBuiltinCustom, nodeIndex))
180 {
181 return kTfLiteError;
182 }
183
184 // Gather output indices and use to get output tensors.
185 int numOutputs = 0;
186 const int* outputTensors;
187 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
188 {
189 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
190 tfLiteContext,
191 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
192 nodeIndex);
193 return kTfLiteError;
194 }
195
196 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
197 if (!IsValid(tfLiteContext, tfLiteOutputTensor, kTfLiteBuiltinCustom, nodeIndex))
198 {
199 return kTfLiteError;
200 }
201
202 // Set the input and output info
203 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
204 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
205
206 // Custom Operators are defined by the name string associated to the operator. Use this to determine
207 // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite.
208 armnn::PoolingAlgorithm poolingAlgorithm;
209 if (customOperatorName == "MaxPool3D")
210 {
211 poolingAlgorithm = armnn::PoolingAlgorithm::Max;
212 }
213 else if (customOperatorName == "AveragePool3D")
214 {
215 poolingAlgorithm = armnn::PoolingAlgorithm::Average;
216 }
217 else
218 {
219 return kTfLiteError;
220 }
221 // Create the armnn pool3d descriptor and set the algorithm parsed above.
222 armnn::Pooling3dDescriptor descriptor;
223 descriptor.m_PoolType = poolingAlgorithm;
224
225 // custom_initial_data and custom_initial_data_size are void* variables defined in the tflite registration
226 // used to access the custom option buffer for the operator.
227 const void* customData = nullptr;
228 int customDataSize = 0;
229 if (TfLiteOpaqueNodeGetCustomInitialData(tfLiteNode, &customData, &customDataSize) != kTfLiteOk)
230 {
231 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
232 tfLiteContext,
233 "TfLiteArmnnOpaqueDelegate: Unable to initialise initial custom data from node #%d: ",
234 nodeIndex);
235 return kTfLiteError;
236 }
237
238 // Reinterpret the void* to a byte buffer to access the options data in the flexbuffers map.
239 const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(customData),
240 customDataSize).AsMap();
241 // poolDims is a vector of [ 1, Depth, Height, Width, 1 ]
242 const auto poolDims = m["ksize"].AsTypedVector();
243 descriptor.m_PoolWidth = poolDims[3].AsInt32();
244 descriptor.m_PoolHeight = poolDims[2].AsInt32();
245 descriptor.m_PoolDepth = poolDims[1].AsInt32();
246
247 // strideDimes is a vector of [ 1, Z, Y, X, 1]
248 const auto strideDims = m["strides"].AsTypedVector();
249 descriptor.m_StrideX = strideDims[3].AsInt32();
250 descriptor.m_StrideY = strideDims[2].AsInt32();
251 descriptor.m_StrideZ = strideDims[1].AsInt32();
252 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
253
254 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
255 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
256 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
257
258 // CalcPadding expects a TfLitePadding type. Parse flexbuffers to extract padding string and create TfLitePadding.
259 std::string paddingStr = m["padding"].AsString().str();
260 TfLitePadding padding;
261 if (paddingStr == "VALID")
262 {
263 padding = kTfLitePaddingValid;
264 }
265 else if (paddingStr == "SAME")
266 {
267 padding = kTfLitePaddingSame;
268 }
269 else
270 {
271 padding = kTfLitePaddingUnknown;
272 }
273 // Calculates padding for each pooling dimension separately
274 CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
275 descriptor.m_PadTop, descriptor.m_PadBottom, padding);
276 CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
277 descriptor.m_PadLeft, descriptor.m_PadRight, padding);
278 CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u,
279 descriptor.m_PadFront, descriptor.m_PadBack, padding);
280
281
282 // Check activation by parsing the string from the flexbuffer map
283 std::string activationTypeStr = m["activation"].AsString().str();
284 TfLiteFusedActivation activationType = kTfLiteActNone;
285
286 if (activationTypeStr == "kTfLiteActRelu")
287 {
288 activationType = kTfLiteActRelu;
289 }
290 else if (activationTypeStr == "kTfLiteActReluN1To1")
291 {
292 activationType = kTfLiteActReluN1To1;
293 }
294 else if (activationTypeStr == "kTfLiteActRelu6")
295 {
296 activationType = kTfLiteActRelu6;
297 }
298 else if (activationTypeStr == "kTfLiteActTanh")
299 {
300 activationType = kTfLiteActTanh;
301 }
302 else if (activationTypeStr == "kTfLiteActSignBit")
303 {
304 activationType = kTfLiteActSignBit;
305 }
306 else if (activationTypeStr == "kTfLiteActSigmoid")
307 {
308 activationType = kTfLiteActSigmoid;
309 }
310 else
311 {
312 activationType = kTfLiteActNone;
313 }
314
315 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
316 tfLiteContext,
317 outputTensorInfo,
318 outputTensorInfo,
319 activationType);
320 if(activationStatus != kTfLiteOk)
321 {
322 return kTfLiteError;
323 }
324
325 // Validate the output info.
326 bool isSupported = false;
327 armnn::BackendId setBackend;
328 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
329 {
330 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("POOLING_3D",
331 tfLiteContext,
332 IsPooling3dSupported,
333 delegateData.m_Backends,
334 isSupported,
335 setBackend,
336 inputTensorInfo,
337 outputTensorInfo,
338 descriptor);
339 };
340
341 if (!delegateData.m_Network)
342 {
343 validateFunc(outputTensorInfo, isSupported);
344 return isSupported ? kTfLiteOk : kTfLiteError;
345 }
346
347 // Create the Layer
Mike Kellya2806502023-08-03 10:42:11 +0100348 auto layerName = GetName(armnn::LayerType::Pooling3d, nodeIndex);
349 armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor, layerName.c_str());
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100350 poolingLayer->SetBackendId(setBackend);
351 ARMNN_ASSERT(poolingLayer != nullptr);
352
353 // Create and set output slots
354 armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
355 outputSlot.SetTensorInfo(outputTensorInfo);
356
357 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100358 if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100359 {
360 return kTfLiteError;
361 }
362
Mike Kellya2806502023-08-03 10:42:11 +0100363 if (Connect(poolingLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100364 {
365 return kTfLiteError;
366 }
367
Mike Kellya2806502023-08-03 10:42:11 +0100368 return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex);
Matthew Sloyan48ec8132023-04-27 17:04:47 +0100369}
370
371} // namespace armnnOpaqueDelegate