Francis Murtagh | c4fb0dd | 2023-03-16 17:01:56 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2023 Arm Ltd and Contributors. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
Kevin May | 81b66f3 | 2023-04-26 14:55:36 +0100 | [diff] [blame] | 5 | |
| 6 | #include <OpaqueDelegateUtils.hpp> |
| 7 | |
| 8 | #include <tensorflow/lite/builtin_ops.h> |
| 9 | #include <tensorflow/lite/c/builtin_op_data.h> |
| 10 | #include <tensorflow/lite/c/common.h> |
| 11 | #include <tensorflow/lite/minimal_logging.h> |
| 12 | |
| 13 | namespace armnnOpaqueDelegate |
| 14 | { |
| 15 | |
| 16 | TfLiteStatus VisitBatchToSpaceNdOperator(DelegateData& delegateData, |
| 17 | TfLiteOpaqueContext* tfLiteContext, |
| 18 | TfLiteOpaqueNode* tfLiteNode, |
| 19 | int nodeIndex, |
| 20 | int32_t operatorCode) |
| 21 | { |
| 22 | TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex)); |
| 23 | TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| 24 | |
| 25 | int numInputs = 3; |
| 26 | const int* inputTensors; |
| 27 | if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) |
| 28 | { |
| 29 | TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( |
| 30 | tfLiteContext, |
| 31 | "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", |
| 32 | nodeIndex); |
| 33 | return kTfLiteError; |
| 34 | } |
| 35 | |
| 36 | int numOutputs = 0; |
| 37 | const int* outputTensors; |
| 38 | if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) |
| 39 | { |
| 40 | TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( |
| 41 | tfLiteContext, |
| 42 | "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", |
| 43 | nodeIndex); |
| 44 | return kTfLiteError; |
| 45 | } |
| 46 | |
| 47 | const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]); |
| 48 | if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) |
| 49 | { |
| 50 | return kTfLiteError; |
| 51 | } |
| 52 | const TfLiteOpaqueTensor* tfLiteBlockShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, |
| 53 | inputTensors[1]); |
| 54 | if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex)) |
| 55 | { |
| 56 | return kTfLiteError; |
| 57 | } |
| 58 | const TfLiteOpaqueTensor* tfLiteCropsTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]); |
| 59 | if (!IsValid(tfLiteContext, tfLiteCropsTensor, operatorCode, nodeIndex)) |
| 60 | { |
| 61 | return kTfLiteError; |
| 62 | } |
| 63 | const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, |
| 64 | outputTensors[0]); |
| 65 | if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) |
| 66 | { |
| 67 | return kTfLiteError; |
| 68 | } |
| 69 | |
| 70 | const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); |
| 71 | const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBlockShapeTensor); |
| 72 | const armnn::TensorInfo& cropsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteCropsTensor); |
| 73 | const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); |
| 74 | |
| 75 | |
| 76 | // Copy memory into block and crops |
| 77 | std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements()); |
| 78 | ::memcpy(blockShape.data(), TfLiteOpaqueTensorData(tfLiteBlockShapeTensor), blockShapeTensorInfo.GetNumBytes()); |
| 79 | |
| 80 | std::vector<unsigned int> cropsVector(cropsTensorInfo.GetNumElements()); |
| 81 | std::memcpy(cropsVector.data(), TfLiteOpaqueTensorData(tfLiteCropsTensor), cropsTensorInfo.GetNumBytes()); |
| 82 | |
| 83 | size_t step = 2; |
| 84 | std::vector<std::pair<unsigned int, unsigned int>> crops; |
| 85 | for (unsigned int i = 0; i < cropsTensorInfo.GetNumElements() / step; ++i) |
| 86 | { |
| 87 | crops.emplace_back(cropsVector[i * step], cropsVector[i * step + 1]); |
| 88 | } |
| 89 | |
| 90 | // Make a descriptor |
| 91 | armnn::BatchToSpaceNdDescriptor descriptor; |
| 92 | descriptor.m_BlockShape = blockShape; |
| 93 | descriptor.m_Crops = crops; |
| 94 | descriptor.m_DataLayout = armnn::DataLayout::NHWC; |
| 95 | |
| 96 | // Check if supported |
| 97 | bool isSupported = false; |
| 98 | armnn::BackendId setBackend; |
| 99 | auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) |
| 100 | { |
| 101 | FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("BATCH_TO_SPACE_ND", |
| 102 | tfLiteContext, |
| 103 | IsBatchToSpaceNdSupported, |
| 104 | delegateData.m_Backends, |
| 105 | isSupported, |
| 106 | setBackend, |
| 107 | inputTensorInfo, |
| 108 | outputTensorInfo, |
| 109 | descriptor); |
| 110 | }; |
| 111 | |
| 112 | // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the |
| 113 | // support for the operator |
| 114 | // If supported, VisitBatchToSpaceNdOperator will be called again to add the layer to the network as seen below |
| 115 | if (!delegateData.m_Network) |
| 116 | { |
| 117 | validateFunc(outputTensorInfo, isSupported); |
| 118 | return isSupported ? kTfLiteOk : kTfLiteError; |
| 119 | } |
| 120 | |
| 121 | // Add a BatchToSpace layer |
| 122 | armnn::IConnectableLayer* layer = delegateData.m_Network->AddBatchToSpaceNdLayer(descriptor); |
| 123 | layer->SetBackendId(setBackend); |
| 124 | ARMNN_ASSERT(layer != nullptr); |
| 125 | |
| 126 | armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); |
| 127 | outputSlot.SetTensorInfo(outputTensorInfo); |
| 128 | |
| 129 | // try to connect the Constant Inputs if there are any |
| 130 | if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) |
| 131 | { |
| 132 | return kTfLiteError; |
| 133 | } |
| 134 | |
| 135 | // Connect |
| 136 | return Connect(layer, tfLiteContext, tfLiteNode, delegateData); |
| 137 | } |
| 138 | |
| 139 | TfLiteStatus VisitSpaceToBatchNdOperator(DelegateData& delegateData, |
| 140 | TfLiteOpaqueContext* tfLiteContext, |
| 141 | TfLiteOpaqueNode* tfLiteNode, |
| 142 | int nodeIndex, |
| 143 | int32_t operatorCode) |
| 144 | { |
| 145 | TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex)); |
| 146 | TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| 147 | |
| 148 | int numInputs = 3; |
| 149 | const int* inputTensors; |
| 150 | if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) |
| 151 | { |
| 152 | TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( |
| 153 | tfLiteContext, |
| 154 | "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ", |
| 155 | nodeIndex); |
| 156 | return kTfLiteError; |
| 157 | } |
| 158 | |
| 159 | int numOutputs = 0; |
| 160 | const int* outputTensors; |
| 161 | if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk) |
| 162 | { |
| 163 | TF_LITE_OPAQUE_MAYBE_KERNEL_LOG( |
| 164 | tfLiteContext, |
| 165 | "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ", |
| 166 | nodeIndex); |
| 167 | return kTfLiteError; |
| 168 | } |
| 169 | |
| 170 | const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,inputTensors[0]); |
| 171 | if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) |
| 172 | { |
| 173 | return kTfLiteError; |
| 174 | } |
| 175 | const TfLiteOpaqueTensor* tfLiteBlockShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, |
| 176 | inputTensors[1]); |
| 177 | if (!IsValid(tfLiteContext, tfLiteBlockShapeTensor, operatorCode, nodeIndex)) |
| 178 | { |
| 179 | return kTfLiteError; |
| 180 | } |
| 181 | const TfLiteOpaqueTensor* tfLitePadListTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, |
| 182 | inputTensors[2]); |
| 183 | if (!IsValid(tfLiteContext, tfLitePadListTensor, operatorCode, nodeIndex)) |
| 184 | { |
| 185 | return kTfLiteError; |
| 186 | } |
| 187 | const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, |
| 188 | outputTensors[0]); |
| 189 | if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) |
| 190 | { |
| 191 | return kTfLiteError; |
| 192 | } |
| 193 | |
| 194 | const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor); |
| 195 | const armnn::TensorInfo& blockShapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBlockShapeTensor); |
| 196 | const armnn::TensorInfo& padListTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLitePadListTensor); |
| 197 | const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true); |
| 198 | |
| 199 | std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements()); |
| 200 | std::memcpy(blockShape.data(), |
| 201 | TfLiteOpaqueTensorData(tfLiteBlockShapeTensor), |
| 202 | blockShapeTensorInfo.GetNumBytes()); |
| 203 | |
| 204 | std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements()); |
| 205 | std::memcpy(padListVector.data(), |
| 206 | TfLiteOpaqueTensorData(tfLitePadListTensor), |
| 207 | padListTensorInfo.GetNumBytes()); |
| 208 | |
| 209 | size_t step = 2; |
| 210 | std::vector<std::pair<unsigned int, unsigned int>> padList; |
| 211 | for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i) |
| 212 | { |
| 213 | padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]); |
| 214 | } |
| 215 | |
| 216 | armnn::SpaceToBatchNdDescriptor descriptor; |
| 217 | descriptor.m_BlockShape = blockShape; |
| 218 | descriptor.m_PadList = padList; |
| 219 | descriptor.m_DataLayout = armnn::DataLayout::NHWC; |
| 220 | |
| 221 | // Check if supported |
| 222 | bool isSupported = false; |
| 223 | armnn::BackendId setBackend; |
| 224 | auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported) |
| 225 | { |
| 226 | FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPACE_TO_BATCH_ND", |
| 227 | tfLiteContext, |
| 228 | IsSpaceToBatchNdSupported, |
| 229 | delegateData.m_Backends, |
| 230 | isSupported, |
| 231 | setBackend, |
| 232 | inputTensorInfo, |
| 233 | outputTensorInfo, |
| 234 | descriptor); |
| 235 | }; |
| 236 | |
| 237 | // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the |
| 238 | // support for the operator |
| 239 | // If supported, VisitSpaceToBatchNdOperator will be called again to add the layer to the network as seen below |
| 240 | if (!delegateData.m_Network) |
| 241 | { |
| 242 | validateFunc(outputTensorInfo, isSupported); |
| 243 | return isSupported ? kTfLiteOk : kTfLiteError; |
| 244 | } |
| 245 | |
| 246 | // Add a SpaceToBatch layer |
| 247 | armnn::IConnectableLayer* layer = delegateData.m_Network->AddSpaceToBatchNdLayer(descriptor); |
| 248 | layer->SetBackendId(setBackend); |
| 249 | ARMNN_ASSERT(layer != nullptr); |
| 250 | |
| 251 | armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); |
| 252 | outputSlot.SetTensorInfo(outputTensorInfo); |
| 253 | |
| 254 | // try to connect the Constant Inputs if there are any |
| 255 | if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk ) |
| 256 | { |
| 257 | return kTfLiteError; |
| 258 | } |
| 259 | |
| 260 | // Connect |
| 261 | return Connect(layer, tfLiteContext, tfLiteNode, delegateData); |
| 262 | } |
| 263 | |
| 264 | } // namespace |