blob: dfe90cb1f9a6e10481df8b238221a89ca8e3b1f3 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShead21abaf2022-06-10 14:49:11 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan32ca1442020-11-13 17:51:56 +00008#include "DelegateUtils.hpp"
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
Ryan OShead21abaf2022-06-10 14:49:11 +010014#include <flatbuffers/flexbuffers.h>
Sadik Armagan62483be2020-10-23 17:14:43 +010015
16namespace armnnDelegate
17{
18
Ryan OShead21abaf2022-06-10 14:49:11 +010019TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData,
20 TfLiteContext* tfLiteContext,
21 TfLiteNode* tfLiteNode,
22 int nodeIndex,
23 int32_t tfLitePoolingOperatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +010024{
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000025 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
26 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
27
28 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
29 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
30 if (IsDynamicTensor(tfLiteInputTensor))
31 {
32 TF_LITE_MAYBE_KERNEL_LOG(
33 tfLiteContext,
34 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
35 tfLitePoolingOperatorCode, nodeIndex);
36 return kTfLiteError;
37 }
38
39 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
40 if (IsDynamicTensor(tfLiteOutputTensor))
41 {
42 TF_LITE_MAYBE_KERNEL_LOG(
43 tfLiteContext,
44 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
45 tfLitePoolingOperatorCode, nodeIndex);
46 return kTfLiteError;
47 }
48
49 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +010050 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000051
52 armnn::PoolingAlgorithm poolingAlgorithm;
53 switch(tfLitePoolingOperatorCode)
54 {
Narumol Prangnawarat80815362020-11-11 11:33:03 +000055 case kTfLiteBuiltinAveragePool2d:
56 poolingAlgorithm = armnn::PoolingAlgorithm::Average;
57 break;
58 case kTfLiteBuiltinL2Pool2d:
59 poolingAlgorithm = armnn::PoolingAlgorithm::L2;
60 break;
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000061 case kTfLiteBuiltinMaxPool2d:
62 poolingAlgorithm = armnn::PoolingAlgorithm::Max;
63 break;
64 default:
65 return kTfLiteError;
66 }
67
68 armnn::Pooling2dDescriptor descriptor;
69 descriptor.m_PoolType = poolingAlgorithm;
70
71 auto* params = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data);
72 descriptor.m_PoolWidth = params->filter_width;
73 descriptor.m_PoolHeight = params->filter_height;
74 descriptor.m_StrideX = params->stride_width;
75 descriptor.m_StrideY = params->stride_height;
76 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
77
78 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
79 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
80
81 CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
82 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
83 CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
84 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
85
86 bool isSupported = false;
87 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
88 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000089 FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D",
Narumol Prangnawarat50c87d32020-11-09 18:42:11 +000090 tfLiteContext,
91 IsPooling2dSupported,
92 delegateData.m_Backends,
93 isSupported,
94 inputTensorInfo,
95 outputTensorInfo,
96 descriptor);
97 };
98
99 if (!delegateData.m_Network)
100 {
101 validateFunc(outputTensorInfo, isSupported);
102 return isSupported ? kTfLiteOk : kTfLiteError;
103 }
104
105 armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor);
106 ARMNN_ASSERT(poolingLayer != nullptr);
107
108 armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
109 outputSlot.SetTensorInfo(outputTensorInfo);
110 Connect(poolingLayer, tfLiteNode, delegateData);
111
112 // Check activation
113 TfLiteFusedActivation activationType = params->activation;
114 return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100115}
116
Ryan OShead21abaf2022-06-10 14:49:11 +0100117TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData,
118 TfLiteContext* tfLiteContext,
119 TfLiteNode* tfLiteNode,
120 int nodeIndex,
121 std::string customOperatorName)
122{
123 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
124 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
125
126 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
127 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
128 if (IsDynamicTensor(tfLiteInputTensor))
129 {
130 TF_LITE_MAYBE_KERNEL_LOG(
131 tfLiteContext,
132 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
133 customOperatorName.c_str(), nodeIndex);
134 return kTfLiteError;
135 }
136
137 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
138 if (IsDynamicTensor(tfLiteOutputTensor))
139 {
140 TF_LITE_MAYBE_KERNEL_LOG(
141 tfLiteContext,
142 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
143 customOperatorName.c_str(), nodeIndex);
144 return kTfLiteError;
145 }
146 // Set the input and output info
147 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100148 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Ryan OShead21abaf2022-06-10 14:49:11 +0100149
150 // Custom Operators are defined by the name string associated to the operator. Use this to determine
151 // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite.
152 armnn::PoolingAlgorithm poolingAlgorithm;
153 if (customOperatorName == "MaxPool3D")
154 {
155 poolingAlgorithm = armnn::PoolingAlgorithm::Max;
156 }
157 else if (customOperatorName == "AveragePool3D")
158 {
159 poolingAlgorithm = armnn::PoolingAlgorithm::Average;
160 }
161 else
162 {
163 return kTfLiteError;
164 }
165 // Create the armnn pool3d descriptor and set the algorithm parsed above.
166 armnn::Pooling3dDescriptor descriptor;
167 descriptor.m_PoolType = poolingAlgorithm;
168
169 // custom_initial_data and custom_initial_data_size are void* variables defined in the tflite registration
170 // used to access the custom option buffer for the operator.
171 auto custom_data = tfLiteNode->custom_initial_data;
172 auto custom_data_size = tfLiteNode->custom_initial_data_size;
173 // Reinterpret the void* to a byte buffer to access the options data in the flexbuffers map.
174 const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(custom_data),
175 custom_data_size).AsMap();
176 // poolDims is a vector of [ 1, Depth, Height, Width, 1 ]
177 const auto poolDims = m["ksize"].AsTypedVector();
178 descriptor.m_PoolWidth = poolDims[3].AsInt32();
179 descriptor.m_PoolHeight = poolDims[2].AsInt32();
180 descriptor.m_PoolDepth = poolDims[1].AsInt32();
181
182 // strideDimes is a vector of [ 1, Z, Y, X, 1]
183 const auto strideDims = m["strides"].AsTypedVector();
184 descriptor.m_StrideX = strideDims[3].AsInt32();
185 descriptor.m_StrideY = strideDims[2].AsInt32();
186 descriptor.m_StrideZ = strideDims[1].AsInt32();
187 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
188
189 unsigned int inputDepth = inputTensorInfo.GetShape()[1];
190 unsigned int inputHeight = inputTensorInfo.GetShape()[2];
191 unsigned int inputWidth = inputTensorInfo.GetShape()[3];
192
193 // CalcPadding expects a TfLitePadding type. Parse flexbuffers to extract padding string and create TfLitePadding.
194 std::string paddingStr = m["padding"].AsString().str();
195 TfLitePadding padding;
196 if (paddingStr == "VALID")
197 {
198 padding = kTfLitePaddingValid;
199 }
200 else if (paddingStr == "SAME")
201 {
202 padding = kTfLitePaddingSame;
203 }
204 else
205 {
206 padding = kTfLitePaddingUnknown;
207 }
208 // Calculates padding for each pooling dimension separately
209 CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u,
210 descriptor.m_PadTop, descriptor.m_PadBottom, padding);
211 CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u,
212 descriptor.m_PadLeft, descriptor.m_PadRight, padding);
213 CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u,
214 descriptor.m_PadFront, descriptor.m_PadBack, padding);
215
216 // Validate the output info.
217 bool isSupported = false;
218 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) {
219 FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D",
220 tfLiteContext,
221 IsPooling3dSupported,
222 delegateData.m_Backends,
223 isSupported,
224 inputTensorInfo,
225 outputTensorInfo,
226 descriptor);
227 };
228
229 if (!delegateData.m_Network)
230 {
231 validateFunc(outputTensorInfo, isSupported);
232 return isSupported ? kTfLiteOk : kTfLiteError;
233 }
234
235 // Create the Layer
236 armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor);
237 ARMNN_ASSERT(poolingLayer != nullptr);
238
239 // Create and set output slots
240 armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0);
241 outputSlot.SetTensorInfo(outputTensorInfo);
242 Connect(poolingLayer, tfLiteNode, delegateData);
243
244 // Check activation by parsing the string from the flexbuffer map
245 std::string activationTypeStr = m["activation"].AsString().str();
246 TfLiteFusedActivation activationType;
247
248 if (activationTypeStr == "kTfLiteActRelu")
249 {
250 activationType = kTfLiteActRelu;
251 }
252 else if (activationTypeStr == "kTfLiteActReluN1To1")
253 {
254 activationType = kTfLiteActReluN1To1;
255 }
256 else if (activationTypeStr == "kTfLiteActRelu6")
257 {
258 activationType = kTfLiteActRelu6;
259 }
260 else if (activationTypeStr == "kTfLiteActTanh")
261 {
262 activationType = kTfLiteActTanh;
263 }
264 else if (activationTypeStr == "kTfLiteActSignBit")
265 {
266 activationType = kTfLiteActSignBit;
267 }
268 else if (activationTypeStr == "kTfLiteActSigmoid")
269 {
270 activationType = kTfLiteActSigmoid;
271 }
272 else
273 {
274 activationType = kTfLiteActNone;
275 }
276
277 return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData);
278}
279
Sadik Armagan62483be2020-10-23 17:14:43 +0100280} // namespace armnnDelegate