blob: 53136b521e0d4619c6f1c746a09492f9313c910d [file] [log] [blame]
Jim Flynn4b2f3472021-10-13 21:20:07 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
Jim Flynn4b2f3472021-10-13 21:20:07 +01003// SPDX-License-Identifier: MIT
4//
5
Jim Flynn4b2f3472021-10-13 21:20:07 +01006#include "SharedFunctions.hpp"
7
Matthew Sloyan11572322023-03-16 10:17:51 +00008#include <ClassicDelegateUtils.hpp>
Jim Flynn4b2f3472021-10-13 21:20:07 +01009
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
Jim Flynn4b2f3472021-10-13 21:20:07 +010014
15namespace armnnDelegate
16{
17
18TfLiteStatus ValidateFloorOperator(DelegateData& delegateData,
19 TfLiteContext* tfLiteContext,
20 const armnn::TensorInfo& inputTensorInfo,
21 const armnn::TensorInfo& outputTensorInfo)
22{
23 bool isSupported = false;
24 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
25 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000026 FORWARD_LAYER_SUPPORT_FUNC("FLOOR",
Jim Flynn4b2f3472021-10-13 21:20:07 +010027 tfLiteContext,
28 IsFloorSupported,
29 delegateData.m_Backends,
30 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +010031 armnn::BackendId(),
Jim Flynn4b2f3472021-10-13 21:20:07 +010032 inputTensorInfo,
33 outInfo);
34 };
35 validateFunc(outputTensorInfo, isSupported);
36 return isSupported ? kTfLiteOk : kTfLiteError;
37}
38
Ryan OShea3ad2e142023-01-13 10:19:20 +000039TfLiteStatus ValidateFusedActivationOperator(DelegateData& delegateData,
40 TfLiteContext* tfLiteContext,
41 const armnn::TensorInfo& inputInfo,
42 const armnn::TensorInfo& outputInfo,
43 TfLiteFusedActivation activationType)
44{
45 armnn::ActivationDescriptor activationDesc;
46
47 switch (activationType)
48 {
49 case kTfLiteActNone:
50 {
51 // No Activation
52 return kTfLiteOk;
53 }
54 case kTfLiteActRelu:
55 {
56 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
57 break;
58 }
59// The name of kTfLiteActRelu1 changed after TF Lite v2.3
60#if defined(ARMNN_POST_TFLITE_2_3)
61 case kTfLiteActReluN1To1:
62#else
63 case kTfLiteActRelu1:
64#endif
65 {
66 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
67 activationDesc.m_A = 1.0f;
68 activationDesc.m_B = -1.0f;
69 break;
70 }
71 case kTfLiteActRelu6:
72 {
73 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
74 activationDesc.m_A = 6.0f;
75 activationDesc.m_B = 0.0f;
76 break;
77 }
78 case kTfLiteActSigmoid:
79 {
80 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
81 break;
82 }
83 case kTfLiteActTanh:
84 {
85 activationDesc.m_Function = armnn::ActivationFunction::TanH;
86 activationDesc.m_A = 1.0f;
87 activationDesc.m_B = 1.0f;
88 break;
89 }
90 default:
91 return kTfLiteError;
92 }
93
94 bool isSupported = false;
95 armnn::BackendId setBackend;
96
97 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
98 {
99 FORWARD_LAYER_SUPPORT_FUNC("ACTIVATION",
100 tfLiteContext,
101 IsActivationSupported,
102 delegateData.m_Backends,
103 isSupported,
104 armnn::BackendId(),
105 inputInfo,
106 outputInfo,
107 activationDesc);
108 };
109 validateFunc(outputInfo, isSupported);
110 return isSupported ? kTfLiteOk : kTfLiteError;
111}
112
Mike Kelly080d45d2023-11-10 17:11:53 +0000113TfLiteNode* GetNodeConnectedToInput(TfLiteContext* tfLiteContext,
114 int32_t& connectedIndex,
115 int32_t inputIdx)
116{
117 TfLiteIntArray* executionPlan = nullptr;
118 if (tfLiteContext->GetExecutionPlan(tfLiteContext, &executionPlan) != kTfLiteOk)
119 {
120 TF_LITE_KERNEL_LOG(tfLiteContext, "TfLiteArmnnDelegate: Unable to get graph execution plan.");
121 return nullptr;
122 }
123
124 for (int i = 0; i < executionPlan->size; ++i)
125 {
126 connectedIndex = executionPlan->data[i];
127
128 // If TfLite nodes can be delegated to ArmNN
129 TfLiteNode* connectedNode = nullptr;
130 TfLiteRegistration* tfLiteRegistration = nullptr;
131 if (tfLiteContext->GetNodeAndRegistration(
132 tfLiteContext, connectedIndex, &connectedNode, &tfLiteRegistration) != kTfLiteOk)
133 {
134 TF_LITE_KERNEL_LOG(tfLiteContext,
135 "TfLiteArmnnDelegate: Unable to get node and registration for node %d.",
136 connectedIndex);
137 continue;
138 }
139 for (int j= 0; j < connectedNode->outputs->size; ++j)
140 {
141 if (connectedNode->outputs->data[j] == inputIdx)
142 {
143 return connectedNode;
144 }
145 }
146 }
147 // No node found so set connectedIndex to -1
148 connectedIndex = -1;
149 return nullptr;
150}
151
152bool WillInputBeOptimizedToConst(TfLiteContext* tfLiteContext, int32_t inputIdx)
153{
154 int32_t connectedIndex;
155 TfLiteNode* connectedNode = GetNodeConnectedToInput(tfLiteContext, connectedIndex, inputIdx);
156
157 if (connectedNode)
158 {
159 TfLiteRegistration* tfLiteRegistration = nullptr;
160
161 if (tfLiteContext->GetNodeAndRegistration(tfLiteContext, connectedIndex, &connectedNode, &tfLiteRegistration)
162 == kTfLiteOk)
163 {
164 switch (tfLiteRegistration->builtin_code)
165 {
166 case kTfLiteBuiltinDequantize:
167 {
168 if (connectedNode->inputs->size >= 1)
169 {
170 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
171 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[connectedNode->inputs->data[0]];
172
173 // If the input to the Dequantize is a Constant then both that Constant layer and the Dequantize
174 // layer will be replaced by a single Constant layer containing the dequantized values.
175 if (tflite::IsConstantTensor(&tfLiteInputTensor))
176 {
177 return true;
178 }
179 }
180 break;
181 }
182 default:
183 {
184 }
185 }
186 }
187 }
188 return false;
189}
Ryan OShea3ad2e142023-01-13 10:19:20 +0000190
Jim Flynn4b2f3472021-10-13 21:20:07 +0100191} // namespace armnnDelegate
192