Sadik Armagan | 62483be | 2020-10-23 17:14:43 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2020 Arm Ltd and Contributors. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #pragma once |
| 7 | |
| 8 | #include <armnn/ArmNN.hpp> |
| 9 | #include <armnn/BackendHelper.hpp> |
| 10 | #include <armnn/utility/Assert.hpp> |
| 11 | |
| 12 | #include <tensorflow/lite/builtin_ops.h> |
| 13 | #include <tensorflow/lite/c/builtin_op_data.h> |
| 14 | #include <tensorflow/lite/c/common.h> |
| 15 | #include <tensorflow/lite/minimal_logging.h> |
| 16 | |
| 17 | namespace |
| 18 | { |
| 19 | |
| 20 | // Macro to call an Is<layer_name>Supported function and log caller name together with reason for lack of support |
| 21 | #define FORWARD_LAYER_SUPPORT_FUNC(funcName, tfLiteContext, func, backends, supported, ...) \ |
| 22 | try \ |
| 23 | { \ |
| 24 | for (auto&& backendId : backends) \ |
| 25 | { \ |
| 26 | auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \ |
| 27 | if (layerSupportObject) \ |
| 28 | { \ |
| 29 | std::string reasonIfUnsupported; \ |
| 30 | supported = \ |
| 31 | layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \ |
| 32 | if (supported) \ |
| 33 | { \ |
| 34 | break; \ |
| 35 | } \ |
| 36 | else \ |
| 37 | { \ |
| 38 | if (reasonIfUnsupported.size() > 0) \ |
| 39 | { \ |
| 40 | TF_LITE_KERNEL_LOG( \ |
| 41 | tfLiteContext, "%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \ |
| 42 | } \ |
| 43 | else \ |
| 44 | { \ |
| 45 | TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by armnn", funcName); \ |
| 46 | } \ |
| 47 | } \ |
| 48 | } \ |
| 49 | else \ |
| 50 | { \ |
| 51 | TF_LITE_KERNEL_LOG(tfLiteContext, "%s: backend not registered: %s", funcName, backendId.Get().c_str()); \ |
| 52 | } \ |
| 53 | } \ |
| 54 | if (!supported) \ |
| 55 | { \ |
| 56 | TF_LITE_KERNEL_LOG(tfLiteContext, "%s: not supported by any specified backend", funcName); \ |
| 57 | } \ |
| 58 | } \ |
| 59 | catch (const armnn::InvalidArgumentException &e) \ |
| 60 | { \ |
| 61 | throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \ |
| 62 | } |
| 63 | |
| 64 | TfLiteStatus ValidateNumInputs(TfLiteContext* tfLiteContext, |
| 65 | TfLiteNode* tfLiteNode, |
| 66 | const unsigned int expectedSize, |
| 67 | int nodeIndex) |
| 68 | { |
| 69 | auto numInputs = tfLiteNode->inputs->size; |
| 70 | if (numInputs != expectedSize) |
| 71 | { |
| 72 | TF_LITE_MAYBE_KERNEL_LOG( |
| 73 | tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of inputs (%d != %d) in node #%d", |
| 74 | numInputs, expectedSize, nodeIndex); |
| 75 | return kTfLiteError; |
| 76 | } |
| 77 | return kTfLiteOk; |
| 78 | } |
| 79 | |
| 80 | TfLiteStatus ValidateNumOutputs(TfLiteContext* tfLiteContext, |
| 81 | TfLiteNode* tfLiteNode, |
| 82 | const unsigned int expectedSize, |
| 83 | int nodeIndex) |
| 84 | { |
| 85 | auto numOutputs = tfLiteNode->outputs->size; |
| 86 | if (numOutputs != expectedSize) |
| 87 | { |
| 88 | TF_LITE_MAYBE_KERNEL_LOG( |
| 89 | tfLiteContext, "TfLiteArmnnDelegate: Unexpected number of outputs (%d != %d) in node #%d", |
| 90 | numOutputs, expectedSize, nodeIndex); |
| 91 | return kTfLiteError; |
| 92 | } |
| 93 | return kTfLiteOk; |
| 94 | } |
| 95 | |
| 96 | bool IsDynamicTensor(const TfLiteTensor& tfLiteTensor) |
| 97 | { |
| 98 | auto tensorAllocationType = tfLiteTensor.allocation_type; |
| 99 | if (tensorAllocationType == kTfLiteDynamic) |
| 100 | { |
| 101 | return true; |
| 102 | } |
| 103 | return false; |
| 104 | } |
| 105 | |
| 106 | armnn::TensorInfo GetTensorInfoForTfLiteTensor(const TfLiteTensor& tfLiteTensor) |
| 107 | { |
| 108 | armnn::DataType type; |
| 109 | switch (tfLiteTensor.type) |
| 110 | { |
| 111 | case kTfLiteBool: |
| 112 | type = armnn::DataType::Boolean; |
| 113 | break; |
| 114 | case kTfLiteFloat32: |
| 115 | type = armnn::DataType::Float32; |
| 116 | break; |
| 117 | case kTfLiteFloat16: |
| 118 | type = armnn::DataType::Float16; |
| 119 | break; |
| 120 | case kTfLiteUInt8: |
| 121 | type = armnn::DataType::QAsymmU8; |
| 122 | break; |
| 123 | case kTfLiteInt8: |
| 124 | type = armnn::DataType::QSymmS8; |
| 125 | break; |
| 126 | case kTfLiteInt16: |
| 127 | type = armnn::DataType::QSymmS16; |
| 128 | break; |
| 129 | case kTfLiteInt32: |
| 130 | type = armnn::DataType::Signed32; |
| 131 | break; |
| 132 | default: |
| 133 | throw armnn::Exception("TfLiteArmnnDelegate: Unsupported data type: " + tfLiteTensor.type); |
| 134 | } |
| 135 | |
| 136 | armnn::TensorInfo ret; |
| 137 | auto tensorDimensionSize = tfLiteTensor.dims->size; |
| 138 | if (tensorDimensionSize == 0) |
| 139 | { |
| 140 | armnn::TensorShape tensorShape(armnn::Dimensionality::NotSpecified); |
| 141 | ret = armnn::TensorInfo(tensorShape, type); |
| 142 | } |
| 143 | else |
| 144 | { |
| 145 | std::vector<unsigned int> tensorDims(tensorDimensionSize); |
| 146 | bool dimensionsSpecificity[5] = { true, true, true, true, true }; |
| 147 | for (unsigned int i = 0; i < tensorDimensionSize; ++i) { |
| 148 | auto dim = tfLiteTensor.dims->data[i]; |
| 149 | if (dim == 0) |
| 150 | { |
| 151 | dimensionsSpecificity[i] = false; |
| 152 | } |
| 153 | tensorDims[i] = dim; |
| 154 | } |
| 155 | armnn::TensorShape tensorShape(tensorDimensionSize, tensorDims.data(), dimensionsSpecificity); |
| 156 | ret = armnn::TensorInfo(tensorShape, type); |
| 157 | } |
| 158 | |
| 159 | auto quantizationInfo = tfLiteTensor.quantization; |
| 160 | if (quantizationInfo.type == kTfLiteAffineQuantization) |
| 161 | { |
| 162 | // get per-channel quantization parameters |
| 163 | const auto* affineQuantization = |
| 164 | reinterpret_cast<TfLiteAffineQuantization*>(tfLiteTensor.quantization.params); |
| 165 | std::vector<float> quantizationScales; |
| 166 | for (unsigned int i = 1; i < affineQuantization->scale->size; ++i) |
| 167 | { |
| 168 | quantizationScales.push_back(affineQuantization->scale->data[i]); |
| 169 | } |
| 170 | ret.SetQuantizationScales(quantizationScales); |
| 171 | ret.SetQuantizationDim(armnn::MakeOptional<unsigned int>(affineQuantization->quantized_dimension)); |
| 172 | } |
| 173 | else |
| 174 | { |
| 175 | auto quantizationParameters = tfLiteTensor.params; |
| 176 | ret.SetQuantizationScale(quantizationParameters.scale); |
| 177 | ret.SetQuantizationOffset(quantizationParameters.zero_point); |
| 178 | } |
| 179 | |
| 180 | return ret; |
| 181 | } |
| 182 | |
| 183 | TfLiteStatus Connect(armnn::IConnectableLayer& layer, |
| 184 | TfLiteNode* tfLiteNode, |
| 185 | armnnDelegate::DelegateData& data) |
| 186 | { |
| 187 | ARMNN_ASSERT(tfLiteNode->inputs->size == layer.GetNumInputSlots()); |
| 188 | ARMNN_ASSERT(tfLiteNode->outputs->size == layer.GetNumOutputSlots()); |
| 189 | |
| 190 | // connect the input slots |
| 191 | for (unsigned int inputIndex = 0; inputIndex < layer.GetNumInputSlots(); ++inputIndex) |
| 192 | { |
| 193 | data.m_OutputSlotForNode[tfLiteNode->inputs->data[inputIndex]]->Connect(layer.GetInputSlot(inputIndex)); |
| 194 | } |
| 195 | |
| 196 | // prepare output slots |
| 197 | for (unsigned int outputIndex = 0; outputIndex < layer.GetNumOutputSlots(); ++outputIndex) |
| 198 | { |
| 199 | armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(outputIndex); |
| 200 | data.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot; |
| 201 | } |
| 202 | return kTfLiteOk; |
| 203 | } |
| 204 | |
| 205 | } // namespace anonymous |