blob: 58d7aca0ee7b32f84172395a64feccaa81383b79 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan67e95f22020-10-29 16:14:54 +00008#include "DelegateUtils.hpp"
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan05e9fd22020-11-17 12:01:47 +000014#include "tensorflow/lite/delegates/utils.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010015
16namespace armnnDelegate
17{
18
Sadik Armagan67e95f22020-10-29 16:14:54 +000019TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
20 TfLiteContext* tfLiteContext,
21 const armnn::TensorInfo& inputInfo1,
22 const armnn::TensorInfo& inputInfo2,
23 const armnn::TensorInfo& outputInfo)
24{
25 bool isSupported = false;
26 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
27 {
28 FORWARD_LAYER_SUPPORT_FUNC(__func__,
29 tfLiteContext,
30 IsAdditionSupported,
31 delegateData.m_Backends,
32 isSupported,
33 inputInfo1,
34 inputInfo2,
35 outputTensorInfo);
36 };
37
38 validateFunc(outputInfo, isSupported);
39 return isSupported ? kTfLiteOk : kTfLiteError;
40}
41
Sadik Armagan21a94ff2020-11-09 08:38:30 +000042TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
43 TfLiteContext* tfLiteContext,
44 const armnn::TensorInfo& inputInfo1,
45 const armnn::TensorInfo& inputInfo2,
46 const armnn::TensorInfo& outputInfo)
Sadik Armagan67e95f22020-10-29 16:14:54 +000047{
Sadik Armagan21a94ff2020-11-09 08:38:30 +000048 bool isSupported = false;
49 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
Sadik Armagan67e95f22020-10-29 16:14:54 +000050 {
Sadik Armagan21a94ff2020-11-09 08:38:30 +000051 FORWARD_LAYER_SUPPORT_FUNC(__func__,
52 tfLiteContext,
53 IsDivisionSupported,
54 delegateData.m_Backends,
55 isSupported,
56 inputInfo1,
57 inputInfo2,
58 outputTensorInfo);
59 };
Sadik Armagan67e95f22020-10-29 16:14:54 +000060
Sadik Armagan21a94ff2020-11-09 08:38:30 +000061 validateFunc(outputInfo, isSupported);
62 return isSupported ? kTfLiteOk : kTfLiteError;
63}
64
65TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
66 TfLiteContext* tfLiteContext,
67 const armnn::TensorInfo& inputInfo1,
68 const armnn::TensorInfo& inputInfo2,
69 const armnn::TensorInfo& outputInfo)
70{
71 bool isSupported = false;
72 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
73 {
74 FORWARD_LAYER_SUPPORT_FUNC(__func__,
75 tfLiteContext,
76 IsMaximumSupported,
77 delegateData.m_Backends,
78 isSupported,
79 inputInfo1,
80 inputInfo2,
81 outputTensorInfo);
82 };
83
84 validateFunc(outputInfo, isSupported);
85 return isSupported ? kTfLiteOk : kTfLiteError;
86}
87
88TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
89 TfLiteContext* tfLiteContext,
90 const armnn::TensorInfo& inputInfo1,
91 const armnn::TensorInfo& inputInfo2,
92 const armnn::TensorInfo& outputInfo)
93{
94 bool isSupported = false;
95 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
96 {
97 FORWARD_LAYER_SUPPORT_FUNC(__func__,
98 tfLiteContext,
99 IsMinimumSupported,
100 delegateData.m_Backends,
101 isSupported,
102 inputInfo1,
103 inputInfo2,
104 outputTensorInfo);
105 };
106
107 validateFunc(outputInfo, isSupported);
108 return isSupported ? kTfLiteOk : kTfLiteError;
109}
110
111TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
112 TfLiteContext* tfLiteContext,
113 const armnn::TensorInfo& inputInfo1,
114 const armnn::TensorInfo& inputInfo2,
115 const armnn::TensorInfo& outputInfo)
116{
117 bool isSupported = false;
118 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
119 {
120 FORWARD_LAYER_SUPPORT_FUNC(__func__,
121 tfLiteContext,
122 IsMultiplicationSupported,
123 delegateData.m_Backends,
124 isSupported,
125 inputInfo1,
126 inputInfo2,
127 outputTensorInfo);
128 };
129
130 validateFunc(outputInfo, isSupported);
131 return isSupported ? kTfLiteOk : kTfLiteError;
132}
133
134TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
135 TfLiteContext* tfLiteContext,
136 const armnn::TensorInfo& inputInfo1,
137 const armnn::TensorInfo& inputInfo2,
138 const armnn::TensorInfo& outputInfo)
139{
140 bool isSupported = false;
141 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
142 {
143 FORWARD_LAYER_SUPPORT_FUNC(__func__,
144 tfLiteContext,
145 IsSubtractionSupported,
146 delegateData.m_Backends,
147 isSupported,
148 inputInfo1,
149 inputInfo2,
150 outputTensorInfo);
151 };
152
153 validateFunc(outputInfo, isSupported);
154 return isSupported ? kTfLiteOk : kTfLiteError;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000155}
156
Sadik Armagan62483be2020-10-23 17:14:43 +0100157TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
158 TfLiteContext* tfLiteContext,
159 TfLiteNode* tfLiteNode,
160 int nodeIndex,
161 int32_t elementwiseBinaryOperatorCode)
162{
Sadik Armagan67e95f22020-10-29 16:14:54 +0000163 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
164 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
165
166 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
167 const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
168 if (IsDynamicTensor(tfLiteInputTensor0))
169 {
170 TF_LITE_MAYBE_KERNEL_LOG(
171 tfLiteContext,
172 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
173 elementwiseBinaryOperatorCode, nodeIndex);
174 return kTfLiteError;
175 }
176
177 const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
178 if (IsDynamicTensor(tfLiteInputTensor1))
179 {
180 TF_LITE_MAYBE_KERNEL_LOG(
181 tfLiteContext,
182 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
183 elementwiseBinaryOperatorCode, nodeIndex);
184 return kTfLiteError;
185 }
186
187 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
188 if (IsDynamicTensor(tfLiteOutputTensor))
189 {
190 TF_LITE_MAYBE_KERNEL_LOG(
191 tfLiteContext,
192 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
193 elementwiseBinaryOperatorCode, nodeIndex);
194 return kTfLiteError;
195 }
196
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000197 armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
198 armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
199
Sadik Armagan67e95f22020-10-29 16:14:54 +0000200 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
201
202 if (!delegateData.m_Network)
203 {
204 switch(elementwiseBinaryOperatorCode)
205 {
206 case kTfLiteBuiltinAdd:
207 return ValidateAddOperator(delegateData,
208 tfLiteContext,
209 inputTensorInfo0,
210 inputTensorInfo1,
211 outputTensorInfo);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000212 case kTfLiteBuiltinDiv:
213 return ValidateDivOperator(delegateData,
214 tfLiteContext,
215 inputTensorInfo0,
216 inputTensorInfo1,
217 outputTensorInfo);
218 case kTfLiteBuiltinMaximum:
219 return ValidateMaximumOperator(delegateData,
220 tfLiteContext,
221 inputTensorInfo0,
222 inputTensorInfo1,
223 outputTensorInfo);
224 case kTfLiteBuiltinMinimum:
225 return ValidateMinimumOperator(delegateData,
226 tfLiteContext,
227 inputTensorInfo0,
228 inputTensorInfo1,
229 outputTensorInfo);
230 case kTfLiteBuiltinMul:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000231 return ValidateMulOperator(delegateData,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000232 tfLiteContext,
233 inputTensorInfo0,
234 inputTensorInfo1,
235 outputTensorInfo);
236 case kTfLiteBuiltinSub:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000237 return ValidateSubOperator(delegateData,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000238 tfLiteContext,
239 inputTensorInfo0,
240 inputTensorInfo1,
241 outputTensorInfo);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000242 default:
243 return kTfLiteError;
244 }
245 }
246
247 armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
248
249 switch(elementwiseBinaryOperatorCode)
250 {
251 case kTfLiteBuiltinAdd:
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000252 elementwiseBinaryLayer = delegateData.m_Network->AddAdditionLayer();
253 break;
254 case kTfLiteBuiltinDiv:
255 elementwiseBinaryLayer = delegateData.m_Network->AddDivisionLayer();
256 break;
257 case kTfLiteBuiltinMaximum:
258 elementwiseBinaryLayer = delegateData.m_Network->AddMaximumLayer();
259 break;
260 case kTfLiteBuiltinMinimum:
261 elementwiseBinaryLayer = delegateData.m_Network->AddMinimumLayer();
262 break;
263 case kTfLiteBuiltinMul:
264 elementwiseBinaryLayer = delegateData.m_Network->AddMultiplicationLayer();
265 break;
266 case kTfLiteBuiltinSub:
267 elementwiseBinaryLayer = delegateData.m_Network->AddSubtractionLayer();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000268 break;
269 default:
270 return kTfLiteError;
271 }
272 ARMNN_ASSERT(elementwiseBinaryLayer != nullptr);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000273 armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0);
274 outputSlot.SetTensorInfo(outputTensorInfo);
275
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100276 auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
277 delegateData,
278 tfLiteContext,
279 tfLiteNode);
280 if (inputsTensorsProcess == kTfLiteError)
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000281 {
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100282 return inputsTensorsProcess;
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000283 }
284
Sadik Armagan67e95f22020-10-29 16:14:54 +0000285 auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
286 inputTensorInfo1,
287 elementwiseBinaryLayer,
288 tfLiteContext,
289 tfLiteNode,
290 delegateData);
291 if (!reshapeLayer)
292 {
293 return kTfLiteError;
294 }
295
296 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
297 if (!tfLiteNodeParameters)
298 {
299 // No Activation
300 return kTfLiteOk;
301 }
302 // Check activation
303 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000304 return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100305}
306
307} // namespace armnnDelegate