blob: 824156742decbb60f91c79b0240913c38102ce33 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShead21abaf2022-06-10 14:49:11 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan32ca1442020-11-13 17:51:56 +00008#include "DelegateUtils.hpp"
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
Ryan OShead21abaf2022-06-10 14:49:11 +010014#include <flatbuffers/flexbuffers.h>
Sadik Armagan62483be2020-10-23 17:14:43 +010015
16namespace armnnDelegate
17{
18
Ryan OShead21abaf2022-06-10 14:49:11 +010019TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
20 TfLiteContext* tfLiteContext,
21 TfLiteNode* tfLiteNode,
22 int nodeIndex,
23 int32_t tfLitePoolingOperatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +010024{
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000025 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
26 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
27
28 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
29 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
30 if (IsDynamicTensor(tfLiteInputTensor))
31 {
32 TF_LITE_MAYBE_KERNEL_LOG(
33 tfLiteContext,
34 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
35 tfLitePoolingOperatorCode, nodeIndex);
36 return kTfLiteError;
37 }
38
39 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
40 if (IsDynamicTensor(tfLiteOutputTensor))
41 {
42 TF_LITE_MAYBE_KERNEL_LOG(
43 tfLiteContext,
44 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
45 tfLitePoolingOperatorCode, nodeIndex);
46 return kTfLiteError;
47 }
48
49 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +010050 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000051
52 armnn::PoolingAlgorithm poolingAlgorithm;
53 switch(tfLitePoolingOperatorCode)
54 {
Narumol Prangnawarat80815362020-11-11 11:33:03 +000055 case kTfLiteBuiltinAveragePool2d:
56 poolingAlgorithm = armnn::PoolingAlgorithm::Average;
57 break;
58 case kTfLiteBuiltinL2Pool2d:
59 poolingAlgorithm = armnn::PoolingAlgorithm::L2;
60 break;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000061 case kTfLiteBuiltinMaxPool2d:
62 poolingAlgorithm = armnn::PoolingAlgorithm::Max;
63 break;
64 default:
65 return kTfLiteError;
66 }
67
68 armnn::Pooling2dDescriptor descriptor;
69 descriptor.m_PoolType = poolingAlgorithm;
70
71 auto* params = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
72 descriptor.m_PoolWidth = params->filter_width;
73 descriptor.m_PoolHeight = params->filter_height;
74 descriptor.m_StrideX = params->stride_width;
75 descriptor.m_StrideY = params->stride_height;
76 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
77
78 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
79 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
80
81 CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
82 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
83 CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
84 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
85
86 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +010087 armnn::BackendId setBackend;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000088 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
89 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000090 FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D",
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000091 tfLiteContext,
92 IsPooling2dSupported,
93 delegateData.m_Backends,
94 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +010095 setBackend,
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000096 inputTensorInfo,
97 outputTensorInfo,
98 descriptor);
99 };
100
101 if (!delegateData.m_Network)
102 {
103 validateFunc(outputTensorInfo, isSupported);
104 return isSupported ? kTfLiteOk : kTfLiteError;
105 }
106
107 armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100108 poolingLayer->SetBackendId(setBackend);
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +0000109 ARMNN_ASSERT(poolingLayer != nullptr);
110
111 armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
112 outputSlot.SetTensorInfo(outputTensorInfo);
113 Connect(poolingLayer, tfLiteNode, delegateData);
114
115 // Check activation
116 TfLiteFusedActivation activationType = params->activation;
117 return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100118}
119
Ryan OShead21abaf2022-06-10 14:49:11 +0100120TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
121 TfLiteContext* tfLiteContext,
122 TfLiteNode* tfLiteNode,
123 int nodeIndex,
124 std::string customOperatorName)
125{
126 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
127 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
128
129 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
130 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
131 if (IsDynamicTensor(tfLiteInputTensor))
132 {
133 TF_LITE_MAYBE_KERNEL_LOG(
134 tfLiteContext,
135 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
136 customOperatorName.c_str(), nodeIndex);
137 return kTfLiteError;
138 }
139
140 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
141 if (IsDynamicTensor(tfLiteOutputTensor))
142 {
143 TF_LITE_MAYBE_KERNEL_LOG(
144 tfLiteContext,
145 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
146 customOperatorName.c_str(), nodeIndex);
147 return kTfLiteError;
148 }
149 // Set the input and output info
150 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100151 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Ryan OShead21abaf2022-06-10 14:49:11 +0100152
153 // Custom Operators are defined by the name string associated to the operator. Use this to determine
154 // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite.
155 armnn::PoolingAlgorithm poolingAlgorithm;
156 if (customOperatorName == "MaxPool3D")
157 {
158 poolingAlgorithm = armnn::PoolingAlgorithm::Max;
159 }
160 else if (customOperatorName == "AveragePool3D")
161 {
162 poolingAlgorithm = armnn::PoolingAlgorithm::Average;
163 }
164 else
165 {
166 return kTfLiteError;
167 }
168 // Create the armnn pool3d descriptor and set the algorithm parsed above.
169 armnn::Pooling3dDescriptor descriptor;
170 descriptor.m_PoolType = poolingAlgorithm;
171
172 // custom_initial_data and custom_initial_data_size are void* variables defined in the tflite registration
173 // used to access the custom option buffer for the operator.
174 auto custom_data = tfLiteNode->custom_initial_data;
175 auto custom_data_size = tfLiteNode->custom_initial_data_size;
176 // Reinterpret the void* to a byte buffer to access the options data in the flexbuffers map.
177 const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(custom_data),
178 custom_data_size).AsMap();
179 // poolDims is a vector of [ 1, Depth, Height, Width, 1 ]
180 const auto poolDims = m["ksize"].AsTypedVector();
181 descriptor.m_PoolWidth = poolDims[3].AsInt32();
182 descriptor.m_PoolHeight = poolDims[2].AsInt32();
183 descriptor.m_PoolDepth = poolDims[1].AsInt32();
184
185 // strideDimes is a vector of [ 1, Z, Y, X, 1]
186 const auto strideDims = m["strides"].AsTypedVector();
187 descriptor.m_StrideX = strideDims[3].AsInt32();
188 descriptor.m_StrideY = strideDims[2].AsInt32();
189 descriptor.m_StrideZ = strideDims[1].AsInt32();
190 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
191
192 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
193 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
194 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
195
196 // CalcPadding expects a TfLitePadding type. Parse flexbuffers to extract padding string and create TfLitePadding.
197 std::string paddingStr = m["padding"].AsString().str();
198 TfLitePadding padding;
199 if (paddingStr == "VALID")
200 {
201 padding = kTfLitePaddingValid;
202 }
203 else if (paddingStr == "SAME")
204 {
205 padding = kTfLitePaddingSame;
206 }
207 else
208 {
209 padding = kTfLitePaddingUnknown;
210 }
211 // Calculates padding for each pooling dimension separately
212 CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
213 descriptor.m_PadTop, descriptor.m_PadBottom, padding);
214 CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
215 descriptor.m_PadLeft, descriptor.m_PadRight, padding);
216 CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u,
217 descriptor.m_PadFront, descriptor.m_PadBack, padding);
218
219 // Validate the output info.
220 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100221 armnn::BackendId setBackend;
Ryan OShead21abaf2022-06-10 14:49:11 +0100222 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
223 FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
224 tfLiteContext,
225 IsPooling3dSupported,
226 delegateData.m_Backends,
227 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100228 setBackend,
Ryan OShead21abaf2022-06-10 14:49:11 +0100229 inputTensorInfo,
230 outputTensorInfo,
231 descriptor);
232 };
233
234 if (!delegateData.m_Network)
235 {
236 validateFunc(outputTensorInfo, isSupported);
237 return isSupported ? kTfLiteOk : kTfLiteError;
238 }
239
240 // Create the Layer
241 armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100242 poolingLayer->SetBackendId(setBackend);
Ryan OShead21abaf2022-06-10 14:49:11 +0100243 ARMNN_ASSERT(poolingLayer != nullptr);
244
245 // Create and set output slots
246 armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
247 outputSlot.SetTensorInfo(outputTensorInfo);
248 Connect(poolingLayer, tfLiteNode, delegateData);
249
250 // Check activation by parsing the string from the flexbuffer map
251 std::string activationTypeStr = m["activation"].AsString().str();
252 TfLiteFusedActivation activationType;
253
254 if (activationTypeStr == "kTfLiteActRelu")
255 {
256 activationType = kTfLiteActRelu;
257 }
258 else if (activationTypeStr == "kTfLiteActReluN1To1")
259 {
260 activationType = kTfLiteActReluN1To1;
261 }
262 else if (activationTypeStr == "kTfLiteActRelu6")
263 {
264 activationType = kTfLiteActRelu6;
265 }
266 else if (activationTypeStr == "kTfLiteActTanh")
267 {
268 activationType = kTfLiteActTanh;
269 }
270 else if (activationTypeStr == "kTfLiteActSignBit")
271 {
272 activationType = kTfLiteActSignBit;
273 }
274 else if (activationTypeStr == "kTfLiteActSigmoid")
275 {
276 activationType = kTfLiteActSigmoid;
277 }
278 else
279 {
280 activationType = kTfLiteActNone;
281 }
282
283 return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
284}
285
Sadik Armagan62483be2020-10-23 17:14:43 +0100286} // namespace armnnDelegate