| // |
| // Copyright © 2020 Arm Ltd and Contributors. All rights reserved. |
| // SPDX-License-Identifier: MIT |
| // |
| |
| #pragma once |
| |
| #include <tensorflow/lite/builtin_ops.h> |
| #include <tensorflow/lite/c/builtin_op_data.h> |
| #include <tensorflow/lite/c/common.h> |
| #include <tensorflow/lite/minimal_logging.h> |
| |
| namespace armnnDelegate |
| { |
| |
| TfLiteStatus VisitL2NormalizationOperator(DelegateData& delegateData, |
| TfLiteContext* tfLiteContext, |
| TfLiteNode* tfLiteNode, |
| int nodeIndex, |
| int32_t operatorCode) |
| { |
| TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| |
| const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; |
| const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; |
| if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex)) |
| { |
| return kTfLiteError; |
| } |
| |
| const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; |
| if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex)) |
| { |
| return kTfLiteError; |
| } |
| |
| const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); |
| const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); |
| |
| armnn::L2NormalizationDescriptor descriptor; |
| descriptor.m_DataLayout = armnn::DataLayout::NHWC; |
| |
| bool isSupported = false; |
| auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) |
| { |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| tfLiteContext, |
| IsL2NormalizationSupported, |
| delegateData.m_Backends, |
| isSupported, |
| inputTensorInfo, |
| outInfo, |
| descriptor); |
| }; |
| |
| if (!delegateData.m_Network) |
| { |
| validateFunc(outputTensorInfo, isSupported); |
| return isSupported ? kTfLiteOk : kTfLiteError; |
| } |
| |
| // Add a L2Normalization layer |
| armnn::IConnectableLayer* layer = delegateData.m_Network->AddL2NormalizationLayer(descriptor); |
| ARMNN_ASSERT(layer != nullptr); |
| |
| armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); |
| outputSlot.SetTensorInfo(outputTensorInfo); |
| |
| // Connect |
| return Connect(layer, tfLiteNode, delegateData); |
| } |
| |
| |
| TfLiteStatus VisitLocalResponseNormalizationOperator(DelegateData& delegateData, |
| TfLiteContext* tfLiteContext, |
| TfLiteNode* tfLiteNode, |
| int nodeIndex, |
| int32_t normalizationOperatorCode) |
| { |
| TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex)); |
| |
| const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors; |
| const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]]; |
| if (!IsValid(tfLiteContext, tfLiteInputTensor, normalizationOperatorCode, nodeIndex)) |
| { |
| return kTfLiteError; |
| } |
| |
| const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]]; |
| if (!IsValid(tfLiteContext, tfLiteOutputTensor, normalizationOperatorCode, nodeIndex)) |
| { |
| return kTfLiteError; |
| } |
| |
| const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor); |
| const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor); |
| |
| armnn::NormalizationDescriptor descriptor; |
| descriptor.m_DataLayout = armnn::DataLayout::NHWC; |
| descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across; |
| descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness; |
| |
| auto* params = reinterpret_cast<TfLiteLocalResponseNormParams*>(tfLiteNode->builtin_data); |
| descriptor.m_NormSize = params->radius; |
| descriptor.m_K = params->bias; |
| descriptor.m_Alpha = params->alpha; |
| descriptor.m_Beta = params->beta; |
| |
| // ArmNN expects normSize to be the full size of the normalization window |
| descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize); |
| |
| bool isSupported = false; |
| auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) |
| { |
| FORWARD_LAYER_SUPPORT_FUNC(__func__, |
| tfLiteContext, |
| IsNormalizationSupported, |
| delegateData.m_Backends, |
| isSupported, |
| inputTensorInfo, |
| outInfo, |
| descriptor); |
| }; |
| |
| if (!delegateData.m_Network) |
| { |
| validateFunc(outputTensorInfo, isSupported); |
| return isSupported ? kTfLiteOk : kTfLiteError; |
| } |
| |
| // Add a Normalization layer |
| armnn::IConnectableLayer* layer = delegateData.m_Network->AddNormalizationLayer(descriptor); |
| ARMNN_ASSERT(layer != nullptr); |
| |
| armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0); |
| outputSlot.SetTensorInfo(outputTensorInfo); |
| |
| // Connect |
| return Connect(layer, tfLiteNode, delegateData); |
| } |
| |
| } // namespace armnnDelegate |