Sadik Armagan | 62483be | 2020-10-23 17:14:43 +0100 | [diff] [blame] | 1 | // |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 2 | // Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved. |
Sadik Armagan | 62483be | 2020-10-23 17:14:43 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #pragma once |
| 7 | |
Matthew Sloyan | 1157232 | 2023-03-16 10:17:51 +0000 | [diff] [blame] | 8 | #include <ClassicDelegateUtils.hpp> |
Sadik Armagan | 32ca144 | 2020-11-13 17:51:56 +0000 | [diff] [blame] | 9 | |
Sadik Armagan | 62483be | 2020-10-23 17:14:43 +0100 | [diff] [blame] | 10 | #include <tensorflow/lite/builtin_ops.h> |
| 11 | #include <tensorflow/lite/c/builtin_op_data.h> |
| 12 | #include <tensorflow/lite/c/common.h> |
| 13 | #include <tensorflow/lite/minimal_logging.h> |
Ryan OShea | d21abaf | 2022-06-10 14:49:11 +0100 | [diff] [blame] | 14 | #include <flatbuffers/flexbuffers.h> |
Sadik Armagan | 62483be | 2020-10-23 17:14:43 +0100 | [diff] [blame] | 15 | |
| 16 | namespace armnnDelegate |
| 17 | { |
| 18 | |
Ryan OShea | d21abaf | 2022-06-10 14:49:11 +0100 | [diff] [blame] | 19 | TfLiteStatus VisitPooling2dOperator(DelegateData& delegateData, |
| 20 | TfLiteContext* tfLiteContext, |
| 21 | TfLiteNode* tfLiteNode, |
| 22 | int nodeIndex, |
| 23 | int32_t tfLitePoolingOperatorCode) |
Sadik Armagan | 62483be | 2020-10-23 17:14:43 +0100 | [diff] [blame] | 24 | { |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 25 | TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| 26 | TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| 27 | |
| 28 | const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; |
| 29 | const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; |
| 30 | if (IsDynamicTensor(tfLiteInputTensor)) |
| 31 | { |
| 32 | TF_LITE_MAYBE_KERNEL_LOG( |
| 33 | tfLiteContext, |
| 34 | "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", |
| 35 | tfLitePoolingOperatorCode, nodeIndex); |
| 36 | return kTfLiteError; |
| 37 | } |
| 38 | |
| 39 | const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; |
| 40 | if (IsDynamicTensor(tfLiteOutputTensor)) |
| 41 | { |
| 42 | TF_LITE_MAYBE_KERNEL_LOG( |
| 43 | tfLiteContext, |
| 44 | "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", |
| 45 | tfLitePoolingOperatorCode, nodeIndex); |
| 46 | return kTfLiteError; |
| 47 | } |
| 48 | |
| 49 | const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); |
Sadik Armagan | 90a119b | 2022-08-05 16:12:49 +0100 | [diff] [blame] | 50 | const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 51 | |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 52 | auto* tfLiteNodeParameters = reinterpret_cast<TfLitePoolParams*>(tfLiteNode->builtin_data); |
Ryan OShea | 475c7a8 | 2023-01-30 14:24:15 +0000 | [diff] [blame] | 53 | TfLiteFusedActivation activationType = kTfLiteActNone; |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 54 | if (tfLiteNodeParameters) |
| 55 | { |
| 56 | activationType = tfLiteNodeParameters->activation; |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 57 | TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, |
| 58 | outputTensorInfo, activationType); |
| 59 | if(activationStatus != kTfLiteOk) |
| 60 | { |
| 61 | return kTfLiteError; |
| 62 | } |
| 63 | |
| 64 | } |
| 65 | |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 66 | armnn::PoolingAlgorithm poolingAlgorithm; |
| 67 | switch(tfLitePoolingOperatorCode) |
| 68 | { |
Narumol Prangnawarat | 8081536 | 2020-11-11 11:33:03 +0000 | [diff] [blame] | 69 | case kTfLiteBuiltinAveragePool2d: |
| 70 | poolingAlgorithm = armnn::PoolingAlgorithm::Average; |
| 71 | break; |
| 72 | case kTfLiteBuiltinL2Pool2d: |
| 73 | poolingAlgorithm = armnn::PoolingAlgorithm::L2; |
| 74 | break; |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 75 | case kTfLiteBuiltinMaxPool2d: |
| 76 | poolingAlgorithm = armnn::PoolingAlgorithm::Max; |
| 77 | break; |
| 78 | default: |
| 79 | return kTfLiteError; |
| 80 | } |
| 81 | |
| 82 | armnn::Pooling2dDescriptor descriptor; |
| 83 | descriptor.m_PoolType = poolingAlgorithm; |
| 84 | |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 85 | descriptor.m_PoolWidth = tfLiteNodeParameters->filter_width; |
| 86 | descriptor.m_PoolHeight = tfLiteNodeParameters->filter_height; |
| 87 | descriptor.m_StrideX = tfLiteNodeParameters->stride_width; |
| 88 | descriptor.m_StrideY = tfLiteNodeParameters->stride_height; |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 89 | descriptor.m_DataLayout = armnn::DataLayout::NHWC; |
| 90 | |
| 91 | unsigned int inputHeight = inputTensorInfo.GetShape()[1]; |
| 92 | unsigned int inputWidth = inputTensorInfo.GetShape()[2]; |
| 93 | |
| 94 | CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u, |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 95 | descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding); |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 96 | CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u, |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 97 | descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding); |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 98 | |
| 99 | bool isSupported = false; |
Cathal Corbett | 5383767 | 2022-09-01 11:34:37 +0100 | [diff] [blame] | 100 | armnn::BackendId setBackend; |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 101 | auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) |
| 102 | { |
Sadik Armagan | bfa767c | 2022-02-09 14:58:03 +0000 | [diff] [blame] | 103 | FORWARD_LAYER_SUPPORT_FUNC("POOLING_2D", |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 104 | tfLiteContext, |
| 105 | IsPooling2dSupported, |
| 106 | delegateData.m_Backends, |
| 107 | isSupported, |
Cathal Corbett | 5383767 | 2022-09-01 11:34:37 +0100 | [diff] [blame] | 108 | setBackend, |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 109 | inputTensorInfo, |
| 110 | outputTensorInfo, |
| 111 | descriptor); |
| 112 | }; |
| 113 | |
| 114 | if (!delegateData.m_Network) |
| 115 | { |
| 116 | validateFunc(outputTensorInfo, isSupported); |
| 117 | return isSupported ? kTfLiteOk : kTfLiteError; |
| 118 | } |
| 119 | |
Mike Kelly | 07169c8 | 2023-08-02 13:23:09 +0100 | [diff] [blame] | 120 | auto layerName = GetLayerName(armnn::LayerType::Pooling2d, nodeIndex); |
| 121 | armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling2dLayer(descriptor, layerName.c_str()); |
Cathal Corbett | 5383767 | 2022-09-01 11:34:37 +0100 | [diff] [blame] | 122 | poolingLayer->SetBackendId(setBackend); |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 123 | ARMNN_ASSERT(poolingLayer != nullptr); |
| 124 | |
| 125 | armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0); |
| 126 | outputSlot.SetTensorInfo(outputTensorInfo); |
Ryan OShea | 4c231de | 2023-01-17 15:19:20 +0000 | [diff] [blame] | 127 | |
| 128 | // try to connect the Constant Inputs if there are any |
Mike Kelly | 07169c8 | 2023-08-02 13:23:09 +0100 | [diff] [blame] | 129 | if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk) |
Ryan OShea | 4c231de | 2023-01-17 15:19:20 +0000 | [diff] [blame] | 130 | { |
| 131 | return kTfLiteError; |
| 132 | } |
| 133 | |
Mike Kelly | 07169c8 | 2023-08-02 13:23:09 +0100 | [diff] [blame] | 134 | if (Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk) |
Ryan OShea | 4c231de | 2023-01-17 15:19:20 +0000 | [diff] [blame] | 135 | { |
| 136 | return kTfLiteError; |
| 137 | } |
Narumol Prangnawarat | 50c87d3 | 2020-11-09 18:42:11 +0000 | [diff] [blame] | 138 | |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 139 | // Check and create activation |
Mike Kelly | 07169c8 | 2023-08-02 13:23:09 +0100 | [diff] [blame] | 140 | return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex); |
Sadik Armagan | 62483be | 2020-10-23 17:14:43 +0100 | [diff] [blame] | 141 | } |
| 142 | |
Ryan OShea | d21abaf | 2022-06-10 14:49:11 +0100 | [diff] [blame] | 143 | TfLiteStatus VisitPooling3dOperator(DelegateData& delegateData, |
| 144 | TfLiteContext* tfLiteContext, |
| 145 | TfLiteNode* tfLiteNode, |
| 146 | int nodeIndex, |
| 147 | std::string customOperatorName) |
| 148 | { |
| 149 | TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| 150 | TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| 151 | |
| 152 | const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; |
| 153 | const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; |
| 154 | if (IsDynamicTensor(tfLiteInputTensor)) |
| 155 | { |
| 156 | TF_LITE_MAYBE_KERNEL_LOG( |
| 157 | tfLiteContext, |
| 158 | "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ", |
| 159 | customOperatorName.c_str(), nodeIndex); |
| 160 | return kTfLiteError; |
| 161 | } |
| 162 | |
| 163 | const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; |
| 164 | if (IsDynamicTensor(tfLiteOutputTensor)) |
| 165 | { |
| 166 | TF_LITE_MAYBE_KERNEL_LOG( |
| 167 | tfLiteContext, |
| 168 | "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ", |
| 169 | customOperatorName.c_str(), nodeIndex); |
| 170 | return kTfLiteError; |
| 171 | } |
| 172 | // Set the input and output info |
| 173 | const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); |
Sadik Armagan | 90a119b | 2022-08-05 16:12:49 +0100 | [diff] [blame] | 174 | const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true); |
Ryan OShea | d21abaf | 2022-06-10 14:49:11 +0100 | [diff] [blame] | 175 | |
| 176 | // Custom Operators are defined by the name string associated to the operator. Use this to determine |
| 177 | // which pooling algorithm to create the armnn operator with. L2 Pooling3D is unsupported in TfLite. |
| 178 | armnn::PoolingAlgorithm poolingAlgorithm; |
| 179 | if (customOperatorName == "MaxPool3D") |
| 180 | { |
| 181 | poolingAlgorithm = armnn::PoolingAlgorithm::Max; |
| 182 | } |
| 183 | else if (customOperatorName == "AveragePool3D") |
| 184 | { |
| 185 | poolingAlgorithm = armnn::PoolingAlgorithm::Average; |
| 186 | } |
| 187 | else |
| 188 | { |
| 189 | return kTfLiteError; |
| 190 | } |
| 191 | // Create the armnn pool3d descriptor and set the algorithm parsed above. |
| 192 | armnn::Pooling3dDescriptor descriptor; |
| 193 | descriptor.m_PoolType = poolingAlgorithm; |
| 194 | |
| 195 | // custom_initial_data and custom_initial_data_size are void* variables defined in the tflite registration |
| 196 | // used to access the custom option buffer for the operator. |
| 197 | auto custom_data = tfLiteNode->custom_initial_data; |
| 198 | auto custom_data_size = tfLiteNode->custom_initial_data_size; |
| 199 | // Reinterpret the void* to a byte buffer to access the options data in the flexbuffers map. |
| 200 | const flexbuffers::Map& m = flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(custom_data), |
| 201 | custom_data_size).AsMap(); |
| 202 | // poolDims is a vector of [ 1, Depth, Height, Width, 1 ] |
| 203 | const auto poolDims = m["ksize"].AsTypedVector(); |
| 204 | descriptor.m_PoolWidth = poolDims[3].AsInt32(); |
| 205 | descriptor.m_PoolHeight = poolDims[2].AsInt32(); |
| 206 | descriptor.m_PoolDepth = poolDims[1].AsInt32(); |
| 207 | |
| 208 | // strideDimes is a vector of [ 1, Z, Y, X, 1] |
| 209 | const auto strideDims = m["strides"].AsTypedVector(); |
| 210 | descriptor.m_StrideX = strideDims[3].AsInt32(); |
| 211 | descriptor.m_StrideY = strideDims[2].AsInt32(); |
| 212 | descriptor.m_StrideZ = strideDims[1].AsInt32(); |
| 213 | descriptor.m_DataLayout = armnn::DataLayout::NDHWC; |
| 214 | |
| 215 | unsigned int inputDepth = inputTensorInfo.GetShape()[1]; |
| 216 | unsigned int inputHeight = inputTensorInfo.GetShape()[2]; |
| 217 | unsigned int inputWidth = inputTensorInfo.GetShape()[3]; |
| 218 | |
| 219 | // CalcPadding expects a TfLitePadding type. Parse flexbuffers to extract padding string and create TfLitePadding. |
| 220 | std::string paddingStr = m["padding"].AsString().str(); |
| 221 | TfLitePadding padding; |
| 222 | if (paddingStr == "VALID") |
| 223 | { |
| 224 | padding = kTfLitePaddingValid; |
| 225 | } |
| 226 | else if (paddingStr == "SAME") |
| 227 | { |
| 228 | padding = kTfLitePaddingSame; |
| 229 | } |
| 230 | else |
| 231 | { |
| 232 | padding = kTfLitePaddingUnknown; |
| 233 | } |
| 234 | // Calculates padding for each pooling dimension separately |
| 235 | CalcPadding(inputHeight, descriptor.m_PoolHeight, descriptor.m_StrideY, 1u, |
| 236 | descriptor.m_PadTop, descriptor.m_PadBottom, padding); |
| 237 | CalcPadding(inputWidth, descriptor.m_PoolWidth, descriptor.m_StrideX, 1u, |
| 238 | descriptor.m_PadLeft, descriptor.m_PadRight, padding); |
| 239 | CalcPadding(inputDepth, descriptor.m_PoolDepth, descriptor.m_StrideZ, 1u, |
| 240 | descriptor.m_PadFront, descriptor.m_PadBack, padding); |
| 241 | |
Ryan OShea | d21abaf | 2022-06-10 14:49:11 +0100 | [diff] [blame] | 242 | |
| 243 | // Check activation by parsing the string from the flexbuffer map |
| 244 | std::string activationTypeStr = m["activation"].AsString().str(); |
Ryan OShea | 475c7a8 | 2023-01-30 14:24:15 +0000 | [diff] [blame] | 245 | TfLiteFusedActivation activationType = kTfLiteActNone; |
Ryan OShea | d21abaf | 2022-06-10 14:49:11 +0100 | [diff] [blame] | 246 | |
| 247 | if (activationTypeStr == "kTfLiteActRelu") |
| 248 | { |
| 249 | activationType = kTfLiteActRelu; |
| 250 | } |
| 251 | else if (activationTypeStr == "kTfLiteActReluN1To1") |
| 252 | { |
| 253 | activationType = kTfLiteActReluN1To1; |
| 254 | } |
| 255 | else if (activationTypeStr == "kTfLiteActRelu6") |
| 256 | { |
| 257 | activationType = kTfLiteActRelu6; |
| 258 | } |
| 259 | else if (activationTypeStr == "kTfLiteActTanh") |
| 260 | { |
| 261 | activationType = kTfLiteActTanh; |
| 262 | } |
| 263 | else if (activationTypeStr == "kTfLiteActSignBit") |
| 264 | { |
| 265 | activationType = kTfLiteActSignBit; |
| 266 | } |
| 267 | else if (activationTypeStr == "kTfLiteActSigmoid") |
| 268 | { |
| 269 | activationType = kTfLiteActSigmoid; |
| 270 | } |
| 271 | else |
| 272 | { |
| 273 | activationType = kTfLiteActNone; |
| 274 | } |
| 275 | |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 276 | TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo, |
| 277 | outputTensorInfo, activationType); |
| 278 | if(activationStatus != kTfLiteOk) |
| 279 | { |
| 280 | return kTfLiteError; |
| 281 | } |
| 282 | |
| 283 | |
| 284 | // Validate the output info. |
| 285 | bool isSupported = false; |
| 286 | armnn::BackendId setBackend; |
| 287 | auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) { |
| 288 | FORWARD_LAYER_SUPPORT_FUNC("POOLING_3D", |
| 289 | tfLiteContext, |
| 290 | IsPooling3dSupported, |
| 291 | delegateData.m_Backends, |
| 292 | isSupported, |
| 293 | setBackend, |
| 294 | inputTensorInfo, |
| 295 | outputTensorInfo, |
| 296 | descriptor); |
| 297 | }; |
| 298 | |
| 299 | if (!delegateData.m_Network) |
| 300 | { |
| 301 | validateFunc(outputTensorInfo, isSupported); |
| 302 | return isSupported ? kTfLiteOk : kTfLiteError; |
| 303 | } |
| 304 | |
| 305 | // Create the Layer |
Mike Kelly | 07169c8 | 2023-08-02 13:23:09 +0100 | [diff] [blame] | 306 | auto layerName = GetLayerName(armnn::LayerType::Pooling3d, nodeIndex); |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 307 | armnn::IConnectableLayer* poolingLayer = delegateData.m_Network->AddPooling3dLayer(descriptor); |
| 308 | poolingLayer->SetBackendId(setBackend); |
| 309 | ARMNN_ASSERT(poolingLayer != nullptr); |
| 310 | |
| 311 | // Create and set output slots |
| 312 | armnn::IOutputSlot& outputSlot = poolingLayer->GetOutputSlot(0); |
| 313 | outputSlot.SetTensorInfo(outputTensorInfo); |
Ryan OShea | 4c231de | 2023-01-17 15:19:20 +0000 | [diff] [blame] | 314 | |
| 315 | // try to connect the Constant Inputs if there are any |
Mike Kelly | 07169c8 | 2023-08-02 13:23:09 +0100 | [diff] [blame] | 316 | if (ProcessInputs(poolingLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk) |
Ryan OShea | 4c231de | 2023-01-17 15:19:20 +0000 | [diff] [blame] | 317 | { |
| 318 | return kTfLiteError; |
| 319 | } |
| 320 | |
Mike Kelly | 07169c8 | 2023-08-02 13:23:09 +0100 | [diff] [blame] | 321 | if (Connect(poolingLayer, tfLiteNode, delegateData) != kTfLiteOk) |
Ryan OShea | 4c231de | 2023-01-17 15:19:20 +0000 | [diff] [blame] | 322 | { |
| 323 | return kTfLiteError; |
| 324 | } |
Ryan OShea | 3ad2e14 | 2023-01-13 10:19:20 +0000 | [diff] [blame] | 325 | |
Mike Kelly | 07169c8 | 2023-08-02 13:23:09 +0100 | [diff] [blame] | 326 | return FusedActivation(tfLiteContext, tfLiteNode, activationType, poolingLayer, 0, delegateData, nodeIndex); |
Ryan OShea | d21abaf | 2022-06-10 14:49:11 +0100 | [diff] [blame] | 327 | } |
| 328 | |
Sadik Armagan | 62483be | 2020-10-23 17:14:43 +0100 | [diff] [blame] | 329 | } // namespace armnnDelegate |