Francis Murtagh | c4fb0dd | 2023-03-16 17:01:56 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
Matthew Sloyan | 74be13e | 2023-05-03 17:34:00 +0100 | [diff] [blame] | 5 | |
| 6 | #pragma once |
| 7 | |
| 8 | #include <OpaqueDelegateUtils.hpp> |
| 9 | |
| 10 | namespace armnnOpaqueDelegate |
| 11 | { |
| 12 | |
| 13 | TfLiteStatus VisitUnidirectionalSequenceLstmOperator(DelegateData& delegateData, |
| 14 | TfLiteOpaqueContext* tfLiteContext, |
| 15 | TfLiteOpaqueNode* tfLiteNode, |
| 16 | int nodeIndex, |
| 17 | int32_t operatorCode) |
| 18 | { |
| 19 | auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode); |
| 20 | if (numInputs < 2) |
| 21 | { |
| 22 | TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( |
| 23 | tfLiteContext, |
| 24 | "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d", |
| 25 | 2, numInputs, nodeIndex); |
| 26 | return kTfLiteError; |
| 27 | } |
| 28 | |
| 29 | // Gather input indices and use to get input tensor. |
| 30 | const int* inputTensors; |
| 31 | if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) |
| 32 | { |
| 33 | TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( |
| 34 | tfLiteContext, |
| 35 | "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", |
| 36 | nodeIndex); |
| 37 | return kTfLiteError; |
| 38 | } |
| 39 | |
| 40 | const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); |
| 41 | if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) |
| 42 | { |
| 43 | return kTfLiteError; |
| 44 | } |
| 45 | |
| 46 | // Gather output indices and use to get output tensors. |
| 47 | int numOutputs = 0; |
| 48 | const int* outputTensors; |
| 49 | if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) |
| 50 | { |
| 51 | TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( |
| 52 | tfLiteContext, |
| 53 | "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", |
| 54 | nodeIndex); |
| 55 | return kTfLiteError; |
| 56 | } |
| 57 | |
| 58 | const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]); |
| 59 | if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) |
| 60 | { |
| 61 | return kTfLiteError; |
| 62 | } |
| 63 | |
| 64 | // Set the params structure for the AddUnidirectionalSequenceLstmLayer call |
| 65 | // Please refer to each operand at |
| 66 | // https://www.tensorflow.org/mlir/tfl_ops#tflunidirectional_sequence_lstm_tflunidirectionalsequencelstmop |
| 67 | armnn::LstmInputParams params; |
| 68 | |
| 69 | if (IsOptionalOperandPresent(tfLiteNode, 1)) |
| 70 | { |
| 71 | params.m_InputToInputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 1); |
| 72 | } |
| 73 | |
| 74 | params.m_InputToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 2); |
| 75 | params.m_InputToCellWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 3); |
| 76 | params.m_InputToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 4); |
| 77 | |
| 78 | // Recurrent weight tensors of size {n_cell, n_output} |
| 79 | if (IsOptionalOperandPresent(tfLiteNode, 5)) |
| 80 | { |
| 81 | params.m_RecurrentToInputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 5); |
| 82 | } |
| 83 | |
| 84 | params.m_RecurrentToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 6); |
| 85 | params.m_RecurrentToCellWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 7); |
| 86 | params.m_RecurrentToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 8); |
| 87 | |
| 88 | // Peephole weights tensors of size {n_cell}, representing a diagonal matrix. |
| 89 | if (IsOptionalOperandPresent(tfLiteNode, 9)) |
| 90 | { |
| 91 | params.m_CellToInputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 9); |
| 92 | } |
| 93 | |
| 94 | if (IsOptionalOperandPresent(tfLiteNode, 10)) |
| 95 | { |
| 96 | params.m_CellToForgetWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 10); |
| 97 | } |
| 98 | |
| 99 | if (IsOptionalOperandPresent(tfLiteNode, 11)) |
| 100 | { |
| 101 | params.m_CellToOutputWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 11); |
| 102 | } |
| 103 | |
| 104 | // Gates bias tensors of size {n_cell} |
| 105 | if (IsOptionalOperandPresent(tfLiteNode, 12)) |
| 106 | { |
| 107 | params.m_InputGateBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 12); |
| 108 | } |
| 109 | |
| 110 | params.m_ForgetGateBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 13); |
| 111 | params.m_CellBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 14); |
| 112 | params.m_OutputGateBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 15); |
| 113 | |
| 114 | // Projection weight tensor of size {n_output, n_cell} |
| 115 | if (IsOptionalOperandPresent(tfLiteNode, 16)) |
| 116 | { |
| 117 | params.m_ProjectionWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 16); |
| 118 | } |
| 119 | // Projection bias tensor of size {n_output} |
| 120 | if (IsOptionalOperandPresent(tfLiteNode, 17)) |
| 121 | { |
| 122 | params.m_ProjectionBias = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 17); |
| 123 | } |
| 124 | |
| 125 | // These state tensors are defined as variable tensors, and will be modified by this op. |
| 126 | const TfLiteOpaqueTensor* tfLiteOutputStateIn = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[18]); |
| 127 | if (!IsValid(tfLiteContext, tfLiteOutputStateIn, operatorCode, nodeIndex)) |
| 128 | { |
| 129 | return kTfLiteError; |
| 130 | } |
| 131 | const TfLiteOpaqueTensor* cellStateIn = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[19]); |
| 132 | if (!IsValid(tfLiteContext, cellStateIn, operatorCode, nodeIndex)) |
| 133 | { |
| 134 | return kTfLiteError; |
| 135 | } |
| 136 | armnn::TensorInfo outputStateInInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputStateIn); |
| 137 | armnn::TensorInfo cellStateInInfo = GetTensorInfoForTfLiteOpaqueTensor(cellStateIn); |
| 138 | |
| 139 | // Layer norm coefficient tensors of size {n_cell}, representing a diagonal matrix. |
| 140 | if (IsOptionalOperandPresent(tfLiteNode, 20)) |
| 141 | { |
| 142 | params.m_InputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 20); |
| 143 | } |
| 144 | |
| 145 | if (IsOptionalOperandPresent(tfLiteNode, 21)) |
| 146 | { |
| 147 | params.m_ForgetLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 21); |
| 148 | } |
| 149 | |
| 150 | if (IsOptionalOperandPresent(tfLiteNode, 22)) |
| 151 | { |
| 152 | params.m_CellLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 22); |
| 153 | } |
| 154 | |
| 155 | if (IsOptionalOperandPresent(tfLiteNode, 23)) |
| 156 | { |
| 157 | params.m_OutputLayerNormWeights = GetConstTensorForTfLiteTensor(tfLiteContext, tfLiteNode, 23); |
| 158 | } |
| 159 | |
| 160 | const auto nodeParams = |
| 161 | reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode)); |
| 162 | |
| 163 | // set the layer descriptor |
| 164 | armnn::UnidirectionalSequenceLstmDescriptor desc; |
| 165 | desc.m_ActivationFunc = NonNegative(nodeParams->activation, nodeIndex); |
| 166 | desc.m_ClippingThresCell = nodeParams->cell_clip; |
| 167 | desc.m_ClippingThresProj = nodeParams->proj_clip; |
| 168 | desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr |
| 169 | || params.m_RecurrentToInputWeights == nullptr |
| 170 | || params.m_InputGateBias == nullptr); |
| 171 | desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr || params.m_CellToOutputWeights != nullptr); |
| 172 | desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr); |
| 173 | desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr |
| 174 | || params.m_ForgetLayerNormWeights != nullptr |
| 175 | || params.m_CellLayerNormWeights != nullptr |
| 176 | || params.m_OutputLayerNormWeights != nullptr); |
| 177 | desc.m_TimeMajor = nodeParams->time_major; |
| 178 | |
| 179 | // Intermediates tensors aren't accessible through the new Opaque Interface yet, so we have to cast it for now. |
| 180 | // This should be changed to use the accessor functions once added. |
| 181 | auto* classicTfliteNode = reinterpret_cast<const TfLiteNode*>(tfLiteNode); |
| 182 | |
| 183 | if (classicTfliteNode->intermediates->size > 3 && desc.m_LayerNormEnabled) |
| 184 | { |
| 185 | auto inputIntermediateTensorInfo = |
| 186 | GetTensorInfoForTfLiteOpaqueTensor( |
| 187 | TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, classicTfliteNode->intermediates->data[0])); |
| 188 | auto forgetIntermediateTensorInfo = |
| 189 | GetTensorInfoForTfLiteOpaqueTensor( |
| 190 | TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, classicTfliteNode->intermediates->data[1])); |
| 191 | auto cellIntermediateTensorInfo = |
| 192 | GetTensorInfoForTfLiteOpaqueTensor( |
| 193 | TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, classicTfliteNode->intermediates->data[2])); |
| 194 | auto outputIntermediateTensorInfo = |
| 195 | GetTensorInfoForTfLiteOpaqueTensor( |
| 196 | TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, classicTfliteNode->intermediates->data[3])); |
| 197 | |
| 198 | desc.m_InputIntermediateScale = inputIntermediateTensorInfo.GetQuantizationScale(); |
| 199 | desc.m_ForgetIntermediateScale = forgetIntermediateTensorInfo.GetQuantizationScale(); |
| 200 | desc.m_CellIntermediateScale = cellIntermediateTensorInfo.GetQuantizationScale(); |
| 201 | desc.m_OutputIntermediateScale = outputIntermediateTensorInfo.GetQuantizationScale(); |
| 202 | } |
| 203 | else |
| 204 | { |
| 205 | float defaultIntermediate = std::pow(2, -12); |
| 206 | desc.m_InputIntermediateScale = defaultIntermediate; |
| 207 | desc.m_ForgetIntermediateScale = defaultIntermediate; |
| 208 | desc.m_CellIntermediateScale = defaultIntermediate; |
| 209 | desc.m_OutputIntermediateScale = defaultIntermediate; |
| 210 | } |
| 211 | if (classicTfliteNode->intermediates->size > 4) |
| 212 | { |
| 213 | auto hiddenTensorInfo = |
| 214 | GetTensorInfoForTfLiteOpaqueTensor( |
| 215 | TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, classicTfliteNode->intermediates->data[4])); |
| 216 | desc.m_HiddenStateScale = hiddenTensorInfo.GetQuantizationScale(); |
| 217 | desc.m_HiddenStateZeroPoint = hiddenTensorInfo.GetQuantizationOffset(); |
| 218 | } |
| 219 | |
| 220 | float defaultIntermediate = std::pow(2, -12); |
| 221 | desc.m_InputIntermediateScale = defaultIntermediate; |
| 222 | desc.m_ForgetIntermediateScale = defaultIntermediate; |
| 223 | desc.m_CellIntermediateScale = defaultIntermediate; |
| 224 | desc.m_OutputIntermediateScale = defaultIntermediate; |
| 225 | |
| 226 | const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); |
| 227 | const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); |
| 228 | |
| 229 | unsigned int batchSize = inputTensorInfo.GetShape()[0]; |
| 230 | unsigned int outputSize = outputTensorInfo.GetShape()[2]; |
| 231 | unsigned int numUnits = cellStateInInfo.GetShape()[1]; |
| 232 | |
| 233 | armnn::DataType dataType = inputTensorInfo.GetDataType(); |
| 234 | float qScale = inputTensorInfo.GetQuantizationScale(); |
| 235 | float qOffset = inputTensorInfo.GetQuantizationOffset(); |
| 236 | |
| 237 | armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, dataType, qScale, qOffset); |
| 238 | if (!desc.m_CifgEnabled) |
| 239 | { |
| 240 | scratchBufferTensorInfo = armnn::TensorInfo({batchSize, numUnits * 4}, dataType, qScale, qOffset); |
| 241 | } |
| 242 | armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, |
| 243 | cellStateInInfo.GetDataType(), |
| 244 | cellStateInInfo.GetQuantizationScale(), |
| 245 | cellStateInInfo.GetQuantizationOffset()); |
| 246 | |
| 247 | armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, dataType, qScale, qOffset); |
| 248 | |
| 249 | armnn::LstmInputParamsInfo paramsInfo; |
| 250 | paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo()); |
| 251 | paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo()); |
| 252 | paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo()); |
| 253 | paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo()); |
| 254 | paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo()); |
| 255 | paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo()); |
| 256 | paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo()); |
| 257 | paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo()); |
| 258 | paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo()); |
| 259 | |
| 260 | if (!desc.m_CifgEnabled) |
| 261 | { |
| 262 | paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo()); |
| 263 | paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo()); |
| 264 | if (params.m_CellToInputWeights != nullptr) |
| 265 | { |
| 266 | paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo()); |
| 267 | } |
| 268 | paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo()); |
| 269 | } |
| 270 | |
| 271 | if (desc.m_ProjectionEnabled) |
| 272 | { |
| 273 | paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo()); |
| 274 | if (params.m_ProjectionBias != nullptr) |
| 275 | { |
| 276 | paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo()); |
| 277 | } |
| 278 | } |
| 279 | |
| 280 | if (desc.m_PeepholeEnabled) |
| 281 | { |
| 282 | paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo()); |
| 283 | paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo()); |
| 284 | } |
| 285 | |
| 286 | if (desc.m_LayerNormEnabled) |
| 287 | { |
| 288 | if(!desc.m_CifgEnabled) |
| 289 | { |
| 290 | paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo()); |
| 291 | } |
| 292 | paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo()); |
| 293 | paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo()); |
| 294 | paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo()); |
| 295 | } |
| 296 | |
| 297 | bool isSupported = false; |
| 298 | armnn::BackendId setBackend; |
| 299 | auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) |
| 300 | { |
| 301 | FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("UNIDIRECTIONAL_SEQUENCE_LSTM", |
| 302 | tfLiteContext, |
| 303 | IsUnidirectionalSequenceLstmSupported, |
| 304 | delegateData.m_Backends, |
| 305 | isSupported, |
| 306 | setBackend, |
| 307 | inputTensorInfo, |
| 308 | outputStateInInfo, |
| 309 | cellStateInInfo, |
| 310 | outputStateOutTensorInfo, |
| 311 | cellStateOutTensorInfo, |
| 312 | outputInfo, |
| 313 | desc, |
| 314 | paramsInfo); |
| 315 | }; |
| 316 | |
| 317 | if (!delegateData.m_Network) |
| 318 | { |
| 319 | validateFunc(outputTensorInfo, isSupported); |
| 320 | return isSupported ? kTfLiteOk : kTfLiteError; |
| 321 | } |
| 322 | |
| 323 | armnn::IConnectableLayer* layer = delegateData.m_Network->AddUnidirectionalSequenceLstmLayer(desc, params); |
| 324 | layer->SetBackendId(setBackend); |
| 325 | ARMNN_ASSERT(layer != nullptr); |
| 326 | |
| 327 | layer->GetOutputSlot(0).SetTensorInfo(outputStateOutTensorInfo); |
| 328 | layer->GetOutputSlot(1).SetTensorInfo(cellStateOutTensorInfo); |
| 329 | layer->GetOutputSlot(2).SetTensorInfo(outputTensorInfo); |
| 330 | |
| 331 | // Connect the inputs |
| 332 | // input_layer |
| 333 | delegateData.m_OutputSlotForNode[inputTensors[0]]->Connect(layer->GetInputSlot(0)); |
| 334 | // cellStateIn |
| 335 | delegateData.m_OutputSlotForNode[inputTensors[18]]->Connect(layer->GetInputSlot(1)); |
| 336 | //outputStateIn |
| 337 | delegateData.m_OutputSlotForNode[inputTensors[19]]->Connect(layer->GetInputSlot(2)); |
| 338 | |
| 339 | armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(2); |
| 340 | delegateData.m_OutputSlotForNode[static_cast<unsigned long>(outputTensors[0])] = &outputSlot; |
| 341 | |
| 342 | return kTfLiteOk; |
| 343 | } |
| 344 | |
| 345 | } // namespace armnnOpaqueDelegate |