blob: 434694c40affe604dfc78f0620ef41d321a46899 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan67e95f22020-10-29 16:14:54 +00008#include "DelegateUtils.hpp"
Jim Flynn4b2f3472021-10-13 21:20:07 +01009#include "MultiLayerFacade.hpp"
10#include "SharedFunctions.hpp"
Sadik Armagan67e95f22020-10-29 16:14:54 +000011
Sadik Armagan62483be2020-10-23 17:14:43 +010012#include <tensorflow/lite/builtin_ops.h>
13#include <tensorflow/lite/c/builtin_op_data.h>
14#include <tensorflow/lite/c/common.h>
15#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan05e9fd22020-11-17 12:01:47 +000016#include "tensorflow/lite/delegates/utils.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010017
18namespace armnnDelegate
19{
20
Sadik Armagan67e95f22020-10-29 16:14:54 +000021TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
22 TfLiteContext* tfLiteContext,
23 const armnn::TensorInfo& inputInfo1,
24 const armnn::TensorInfo& inputInfo2,
25 const armnn::TensorInfo& outputInfo)
26{
27 bool isSupported = false;
28 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
29 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000030 FORWARD_LAYER_SUPPORT_FUNC("ADD",
Sadik Armagan67e95f22020-10-29 16:14:54 +000031 tfLiteContext,
32 IsAdditionSupported,
33 delegateData.m_Backends,
34 isSupported,
35 inputInfo1,
36 inputInfo2,
37 outputTensorInfo);
38 };
39
40 validateFunc(outputInfo, isSupported);
41 return isSupported ? kTfLiteOk : kTfLiteError;
42}
43
Jim Flynn4b2f3472021-10-13 21:20:07 +010044
Sadik Armagan21a94ff2020-11-09 08:38:30 +000045TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
46 TfLiteContext* tfLiteContext,
47 const armnn::TensorInfo& inputInfo1,
48 const armnn::TensorInfo& inputInfo2,
49 const armnn::TensorInfo& outputInfo)
Sadik Armagan67e95f22020-10-29 16:14:54 +000050{
Sadik Armagan21a94ff2020-11-09 08:38:30 +000051 bool isSupported = false;
52 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
Sadik Armagan67e95f22020-10-29 16:14:54 +000053 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000054 FORWARD_LAYER_SUPPORT_FUNC("DIV",
Sadik Armagan21a94ff2020-11-09 08:38:30 +000055 tfLiteContext,
56 IsDivisionSupported,
57 delegateData.m_Backends,
58 isSupported,
59 inputInfo1,
60 inputInfo2,
61 outputTensorInfo);
62 };
Sadik Armagan67e95f22020-10-29 16:14:54 +000063
Sadik Armagan21a94ff2020-11-09 08:38:30 +000064 validateFunc(outputInfo, isSupported);
65 return isSupported ? kTfLiteOk : kTfLiteError;
66}
67
Jim Flynn4b2f3472021-10-13 21:20:07 +010068TfLiteStatus ValidateFloorDivOperator(DelegateData& delegateData,
69 TfLiteContext* tfLiteContext,
70 const armnn::TensorInfo& inputInfo1,
71 const armnn::TensorInfo& inputInfo2,
72 const armnn::TensorInfo& outputInfo)
73{
74 // need first to validate that the div operator is supported
75 // then that the floor operator is supported
76 TfLiteStatus status = ValidateDivOperator(delegateData, tfLiteContext, inputInfo1, inputInfo2, outputInfo);
77 if (status != kTfLiteOk)
78 {
79 return status;
80 }
81 // if the inputs and output of the div are all Signed32 we don't need to add the floor operator afterward.
82 if (AreAllSigned32(inputInfo1, inputInfo2, outputInfo))
83 {
84 return status;
85 }
86 // in case broadcasting is being done from one of the inputs to the div
87 // choose the full sized input tensor to pass to the floor validation routine
88 armnn::TensorInfo floorInputInfo = inputInfo1;
89 if (inputInfo1.GetNumDimensions() < inputInfo2.GetNumDimensions())
90 {
91 floorInputInfo = inputInfo2;
92 }
93 status = ValidateFloorOperator(delegateData, tfLiteContext, floorInputInfo, outputInfo);
94 return status;
95}
96
Sadik Armagan21a94ff2020-11-09 08:38:30 +000097TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
98 TfLiteContext* tfLiteContext,
99 const armnn::TensorInfo& inputInfo1,
100 const armnn::TensorInfo& inputInfo2,
101 const armnn::TensorInfo& outputInfo)
102{
103 bool isSupported = false;
104 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
105 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000106 FORWARD_LAYER_SUPPORT_FUNC("MAXIMUM",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000107 tfLiteContext,
108 IsMaximumSupported,
109 delegateData.m_Backends,
110 isSupported,
111 inputInfo1,
112 inputInfo2,
113 outputTensorInfo);
114 };
115
116 validateFunc(outputInfo, isSupported);
117 return isSupported ? kTfLiteOk : kTfLiteError;
118}
119
120TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
121 TfLiteContext* tfLiteContext,
122 const armnn::TensorInfo& inputInfo1,
123 const armnn::TensorInfo& inputInfo2,
124 const armnn::TensorInfo& outputInfo)
125{
126 bool isSupported = false;
127 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
128 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000129 FORWARD_LAYER_SUPPORT_FUNC("MINIMUM",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000130 tfLiteContext,
131 IsMinimumSupported,
132 delegateData.m_Backends,
133 isSupported,
134 inputInfo1,
135 inputInfo2,
136 outputTensorInfo);
137 };
138
139 validateFunc(outputInfo, isSupported);
140 return isSupported ? kTfLiteOk : kTfLiteError;
141}
142
143TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
144 TfLiteContext* tfLiteContext,
145 const armnn::TensorInfo& inputInfo1,
146 const armnn::TensorInfo& inputInfo2,
147 const armnn::TensorInfo& outputInfo)
148{
149 bool isSupported = false;
150 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
151 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000152 FORWARD_LAYER_SUPPORT_FUNC("MUL",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000153 tfLiteContext,
154 IsMultiplicationSupported,
155 delegateData.m_Backends,
156 isSupported,
157 inputInfo1,
158 inputInfo2,
159 outputTensorInfo);
160 };
161
162 validateFunc(outputInfo, isSupported);
163 return isSupported ? kTfLiteOk : kTfLiteError;
164}
165
166TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
167 TfLiteContext* tfLiteContext,
168 const armnn::TensorInfo& inputInfo1,
169 const armnn::TensorInfo& inputInfo2,
170 const armnn::TensorInfo& outputInfo)
171{
172 bool isSupported = false;
173 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
174 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000175 FORWARD_LAYER_SUPPORT_FUNC("SUB",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000176 tfLiteContext,
177 IsSubtractionSupported,
178 delegateData.m_Backends,
179 isSupported,
180 inputInfo1,
181 inputInfo2,
182 outputTensorInfo);
183 };
184
185 validateFunc(outputInfo, isSupported);
186 return isSupported ? kTfLiteOk : kTfLiteError;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000187}
188
Jim Flynn4b2f3472021-10-13 21:20:07 +0100189std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
190 DelegateData& delegateData,
191 const armnn::TensorInfo& outputTensorInfo)
192{
193 armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddDivisionLayer();
194 // if the output of the div is Signed32 the Floor layer is not required
195 if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
196 {
197 return std::make_pair(divisionLayer, divisionLayer);
198 }
199 armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
200 outputSlot.SetTensorInfo(outputTensorInfo);
201 armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
202 outputSlot.Connect(floorLayer->GetInputSlot(0));
203 return std::make_pair(divisionLayer, floorLayer);
204}
205
Sadik Armagan62483be2020-10-23 17:14:43 +0100206TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
207 TfLiteContext* tfLiteContext,
208 TfLiteNode* tfLiteNode,
209 int nodeIndex,
210 int32_t elementwiseBinaryOperatorCode)
211{
Sadik Armagan67e95f22020-10-29 16:14:54 +0000212 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
213 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
214
215 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
216 const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
217 if (IsDynamicTensor(tfLiteInputTensor0))
218 {
219 TF_LITE_MAYBE_KERNEL_LOG(
220 tfLiteContext,
221 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
222 elementwiseBinaryOperatorCode, nodeIndex);
223 return kTfLiteError;
224 }
225
226 const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
227 if (IsDynamicTensor(tfLiteInputTensor1))
228 {
229 TF_LITE_MAYBE_KERNEL_LOG(
230 tfLiteContext,
231 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
232 elementwiseBinaryOperatorCode, nodeIndex);
233 return kTfLiteError;
234 }
235
236 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
237 if (IsDynamicTensor(tfLiteOutputTensor))
238 {
239 TF_LITE_MAYBE_KERNEL_LOG(
240 tfLiteContext,
241 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
242 elementwiseBinaryOperatorCode, nodeIndex);
243 return kTfLiteError;
244 }
245
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000246 armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
247 armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
248
Sadik Armagan67e95f22020-10-29 16:14:54 +0000249 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
250
251 if (!delegateData.m_Network)
252 {
253 switch(elementwiseBinaryOperatorCode)
254 {
255 case kTfLiteBuiltinAdd:
256 return ValidateAddOperator(delegateData,
257 tfLiteContext,
258 inputTensorInfo0,
259 inputTensorInfo1,
260 outputTensorInfo);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000261 case kTfLiteBuiltinDiv:
262 return ValidateDivOperator(delegateData,
263 tfLiteContext,
264 inputTensorInfo0,
265 inputTensorInfo1,
266 outputTensorInfo);
Jim Flynn4b2f3472021-10-13 21:20:07 +0100267 case kTfLiteBuiltinFloorDiv:
268 return ValidateFloorDivOperator(delegateData,
269 tfLiteContext,
270 inputTensorInfo0,
271 inputTensorInfo1,
272 outputTensorInfo);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000273 case kTfLiteBuiltinMaximum:
274 return ValidateMaximumOperator(delegateData,
275 tfLiteContext,
276 inputTensorInfo0,
277 inputTensorInfo1,
278 outputTensorInfo);
279 case kTfLiteBuiltinMinimum:
280 return ValidateMinimumOperator(delegateData,
281 tfLiteContext,
282 inputTensorInfo0,
283 inputTensorInfo1,
284 outputTensorInfo);
285 case kTfLiteBuiltinMul:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000286 return ValidateMulOperator(delegateData,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000287 tfLiteContext,
288 inputTensorInfo0,
289 inputTensorInfo1,
290 outputTensorInfo);
291 case kTfLiteBuiltinSub:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000292 return ValidateSubOperator(delegateData,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000293 tfLiteContext,
294 inputTensorInfo0,
295 inputTensorInfo1,
296 outputTensorInfo);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000297 default:
298 return kTfLiteError;
299 }
300 }
301
302 armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
Jim Flynn4b2f3472021-10-13 21:20:07 +0100303 MultiLayerFacade multiLayer;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000304 switch(elementwiseBinaryOperatorCode)
305 {
306 case kTfLiteBuiltinAdd:
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000307 elementwiseBinaryLayer = delegateData.m_Network->AddAdditionLayer();
308 break;
309 case kTfLiteBuiltinDiv:
310 elementwiseBinaryLayer = delegateData.m_Network->AddDivisionLayer();
311 break;
Jim Flynn4b2f3472021-10-13 21:20:07 +0100312 case kTfLiteBuiltinFloorDiv:
313 {
314 auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
315 multiLayer.AssignValues(layers.first, layers.second);
316 elementwiseBinaryLayer = &multiLayer;
317 }
318 break;
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000319 case kTfLiteBuiltinMaximum:
320 elementwiseBinaryLayer = delegateData.m_Network->AddMaximumLayer();
321 break;
322 case kTfLiteBuiltinMinimum:
323 elementwiseBinaryLayer = delegateData.m_Network->AddMinimumLayer();
324 break;
325 case kTfLiteBuiltinMul:
326 elementwiseBinaryLayer = delegateData.m_Network->AddMultiplicationLayer();
327 break;
328 case kTfLiteBuiltinSub:
329 elementwiseBinaryLayer = delegateData.m_Network->AddSubtractionLayer();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000330 break;
331 default:
332 return kTfLiteError;
333 }
334 ARMNN_ASSERT(elementwiseBinaryLayer != nullptr);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000335 armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0);
336 outputSlot.SetTensorInfo(outputTensorInfo);
337
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100338 auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
339 delegateData,
340 tfLiteContext,
341 tfLiteNode);
342 if (inputsTensorsProcess == kTfLiteError)
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000343 {
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100344 return inputsTensorsProcess;
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000345 }
346
Sadik Armagan67e95f22020-10-29 16:14:54 +0000347 auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
348 inputTensorInfo1,
349 elementwiseBinaryLayer,
350 tfLiteContext,
351 tfLiteNode,
352 delegateData);
353 if (!reshapeLayer)
354 {
355 return kTfLiteError;
356 }
357
358 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
359 if (!tfLiteNodeParameters)
360 {
361 // No Activation
362 return kTfLiteOk;
363 }
364 // Check activation
365 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000366 return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100367}
368
369} // namespace armnnDelegate