blob: f56609001aee2b2ad84f7452b3fcedd8c10c4c74 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9
10namespace armnnOpaqueDelegate
11{
12
Teresa Charlinf69ae562023-04-27 14:42:23 +010013std::string GetLayerName(armnn::ActivationFunction activationFunction)
14{
15 std::string layerName = "ACTIVATION";
16 switch (activationFunction)
17 {
18 case armnn::ActivationFunction::Abs:
19 layerName += " ABS";
20 break;
21 case armnn::ActivationFunction::BoundedReLu:
22 layerName += " BOUNDED_RELU";
23 break;
24 case armnn::ActivationFunction::Elu:
25 layerName += " ELU";
26 break;
27 case armnn::ActivationFunction::HardSwish:
28 layerName += " HARD_SWISH";
29 break;
30 case armnn::ActivationFunction::LeakyReLu:
31 layerName += " LEAKY_RELU";
32 break;
33 case armnn::ActivationFunction::Linear:
34 layerName += " LINEAR";
35 break;
36 case armnn::ActivationFunction::ReLu:
37 layerName += " RELU";
38 break;
39 case armnn::ActivationFunction::Sigmoid:
40 layerName += " SIGMOID";
41 break;
42 case armnn::ActivationFunction::SoftReLu:
43 layerName += " SOFT_RELU";
44 break;
45 case armnn::ActivationFunction::Square:
46 layerName += " SQUARE";
47 break;
48 case armnn::ActivationFunction::Sqrt:
49 layerName += " SQRT";
50 break;
51 case armnn::ActivationFunction::TanH:
52 layerName += " TANH";
53 break;
54 default:
55 layerName += " UNKNOWN";
56 }
57 return layerName;
58}
59
Matthew Sloyan0bd4c622023-04-27 11:48:26 +010060TfLiteStatus ValidateActivationOperator(DelegateData& delegateData,
61 TfLiteOpaqueContext* tfLiteContext,
62 const armnn::TensorInfo& inputInfo,
63 const armnn::TensorInfo& outputInfo,
64 armnn::ActivationDescriptor& activationDesc)
65{
66 bool isSupported = false;
Teresa Charlinf69ae562023-04-27 14:42:23 +010067 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported, std::string layerName)
Matthew Sloyan0bd4c622023-04-27 11:48:26 +010068 {
Teresa Charlinf69ae562023-04-27 14:42:23 +010069 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC(layerName.c_str(),
Matthew Sloyan0bd4c622023-04-27 11:48:26 +010070 tfLiteContext,
71 IsActivationSupported,
72 delegateData.m_Backends,
73 isSupported,
74 armnn::BackendId(),
75 inputInfo,
76 outputInfo,
77 activationDesc);
78 };
79
Teresa Charlinf69ae562023-04-27 14:42:23 +010080 validateFunc(outputInfo, isSupported, GetLayerName(activationDesc.m_Function));
Matthew Sloyan0bd4c622023-04-27 11:48:26 +010081 return isSupported ? kTfLiteOk : kTfLiteError;
82}
83
84TfLiteStatus VisitActivationOperator(DelegateData& delegateData,
85 TfLiteOpaqueContext* tfLiteContext,
86 TfLiteOpaqueNode* tfLiteNode,
87 int nodeIndex,
88 int32_t operatorCode)
89{
90 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
91 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
92
93 // Gather input indices and use to get input tensor.
94 int numInputs = 0;
95 const int* inputTensors;
96 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
97 {
98 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
99 tfLiteContext,
100 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
101 nodeIndex);
102 return kTfLiteError;
103 }
104
105 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
106 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
107 {
108 return kTfLiteError;
109 }
110
111 // Gather output indices and use to get output tensors.
112 int numOutputs = 0;
113 const int* outputTensors;
114 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
115 {
116 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
117 tfLiteContext,
118 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
119 nodeIndex);
120 return kTfLiteError;
121 }
122
123 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
124 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
125 {
126 return kTfLiteError;
127 }
128
129 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
130 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
131
132 armnn::ActivationDescriptor activationDesc;
133 switch(operatorCode)
134 {
135 case kTfLiteBuiltinRelu:
136 {
137 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
138 break;
139 }
140 case kTfLiteBuiltinRelu6:
141 {
142 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
143 activationDesc.m_A = 6.0f;
144 break;
145 }
146 case kTfLiteBuiltinLogistic:
147 {
148 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
149 break;
150 }
151 case kTfLiteBuiltinTanh:
152 {
153 activationDesc.m_Function = armnn::ActivationFunction::TanH;
154 activationDesc.m_A = 1.0f;
155 activationDesc.m_B = 1.0f;
156 break;
157 }
158 case kTfLiteBuiltinElu:
159 {
160 activationDesc.m_Function = armnn::ActivationFunction::Elu;
161 activationDesc.m_A = 1.0f;
162 break;
163 }
164 case kTfLiteBuiltinHardSwish:
165 {
166 activationDesc.m_Function = armnn::ActivationFunction::HardSwish;
167 break;
168 }
Tianle Chengae931732023-07-28 11:53:04 +0100169 case kTfLiteBuiltinLeakyRelu:
170 {
171 // Get alpha param from builtin data
172 auto* leakyReluParameters =
173 reinterpret_cast<TfLiteLeakyReluParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
174 activationDesc.m_Function = armnn::ActivationFunction::LeakyReLu;
175 activationDesc.m_A = leakyReluParameters->alpha;
176 break;
177 }
Matthew Sloyan0bd4c622023-04-27 11:48:26 +0100178 default:
179 {
180 return kTfLiteError;
181 }
182 }
183 if (!delegateData.m_Network)
184 {
185 return ValidateActivationOperator(delegateData,
186 tfLiteContext,
187 inputTensorInfo,
188 outputTensorInfo,
189 activationDesc);
190 }
191 armnn::IConnectableLayer* activationLayer = delegateData.m_Network->AddActivationLayer(activationDesc);
192 ARMNN_ASSERT(activationLayer != nullptr);
193
194 armnn::IOutputSlot& outputSlot = activationLayer->GetOutputSlot(0);
195 outputSlot.SetTensorInfo(outputTensorInfo);
196
197 // try to connect the Constant Inputs if there are any
198 if(ProcessInputs(activationLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
199 {
200 return kTfLiteError;
201 }
202
203 // Connect
204 return Connect(activationLayer, tfLiteContext, tfLiteNode, delegateData);
205}
206
207} // namespace armnnDelegate