blob: 52c6b2434b64cdf9a6742358b27acdcba4a9d492 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan67e95f22020-10-29 16:14:54 +00008#include "DelegateUtils.hpp"
Jim Flynn4b2f3472021-10-13 21:20:07 +01009#include "MultiLayerFacade.hpp"
10#include "SharedFunctions.hpp"
Sadik Armagan67e95f22020-10-29 16:14:54 +000011
Sadik Armagan62483be2020-10-23 17:14:43 +010012#include <tensorflow/lite/builtin_ops.h>
13#include <tensorflow/lite/c/builtin_op_data.h>
14#include <tensorflow/lite/c/common.h>
15#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan05e9fd22020-11-17 12:01:47 +000016#include "tensorflow/lite/delegates/utils.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010017
18namespace armnnDelegate
19{
20
Sadik Armagan67e95f22020-10-29 16:14:54 +000021TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
22 TfLiteContext* tfLiteContext,
23 const armnn::TensorInfo& inputInfo1,
24 const armnn::TensorInfo& inputInfo2,
25 const armnn::TensorInfo& outputInfo)
26{
27 bool isSupported = false;
28 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
29 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000030 FORWARD_LAYER_SUPPORT_FUNC("ADD",
Sadik Armagan67e95f22020-10-29 16:14:54 +000031 tfLiteContext,
32 IsAdditionSupported,
33 delegateData.m_Backends,
34 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +010035 armnn::BackendId(),
Sadik Armagan67e95f22020-10-29 16:14:54 +000036 inputInfo1,
37 inputInfo2,
38 outputTensorInfo);
39 };
40
41 validateFunc(outputInfo, isSupported);
42 return isSupported ? kTfLiteOk : kTfLiteError;
43}
44
Jim Flynn4b2f3472021-10-13 21:20:07 +010045
Sadik Armagan21a94ff2020-11-09 08:38:30 +000046TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
47 TfLiteContext* tfLiteContext,
48 const armnn::TensorInfo& inputInfo1,
49 const armnn::TensorInfo& inputInfo2,
50 const armnn::TensorInfo& outputInfo)
Sadik Armagan67e95f22020-10-29 16:14:54 +000051{
Sadik Armagan21a94ff2020-11-09 08:38:30 +000052 bool isSupported = false;
53 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
Sadik Armagan67e95f22020-10-29 16:14:54 +000054 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000055 FORWARD_LAYER_SUPPORT_FUNC("DIV",
Sadik Armagan21a94ff2020-11-09 08:38:30 +000056 tfLiteContext,
57 IsDivisionSupported,
58 delegateData.m_Backends,
59 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +010060 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +000061 inputInfo1,
62 inputInfo2,
63 outputTensorInfo);
64 };
Sadik Armagan67e95f22020-10-29 16:14:54 +000065
Sadik Armagan21a94ff2020-11-09 08:38:30 +000066 validateFunc(outputInfo, isSupported);
67 return isSupported ? kTfLiteOk : kTfLiteError;
68}
69
Jim Flynn4b2f3472021-10-13 21:20:07 +010070TfLiteStatus ValidateFloorDivOperator(DelegateData& delegateData,
71 TfLiteContext* tfLiteContext,
72 const armnn::TensorInfo& inputInfo1,
73 const armnn::TensorInfo& inputInfo2,
74 const armnn::TensorInfo& outputInfo)
75{
76 // need first to validate that the div operator is supported
77 // then that the floor operator is supported
78 TfLiteStatus status = ValidateDivOperator(delegateData, tfLiteContext, inputInfo1, inputInfo2, outputInfo);
79 if (status != kTfLiteOk)
80 {
81 return status;
82 }
83 // if the inputs and output of the div are all Signed32 we don't need to add the floor operator afterward.
84 if (AreAllSigned32(inputInfo1, inputInfo2, outputInfo))
85 {
86 return status;
87 }
88 // in case broadcasting is being done from one of the inputs to the div
89 // choose the full sized input tensor to pass to the floor validation routine
90 armnn::TensorInfo floorInputInfo = inputInfo1;
91 if (inputInfo1.GetNumDimensions() < inputInfo2.GetNumDimensions())
92 {
93 floorInputInfo = inputInfo2;
94 }
95 status = ValidateFloorOperator(delegateData, tfLiteContext, floorInputInfo, outputInfo);
96 return status;
97}
98
Sadik Armagan21a94ff2020-11-09 08:38:30 +000099TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
100 TfLiteContext* tfLiteContext,
101 const armnn::TensorInfo& inputInfo1,
102 const armnn::TensorInfo& inputInfo2,
103 const armnn::TensorInfo& outputInfo)
104{
105 bool isSupported = false;
106 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
107 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000108 FORWARD_LAYER_SUPPORT_FUNC("MAXIMUM",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000109 tfLiteContext,
110 IsMaximumSupported,
111 delegateData.m_Backends,
112 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100113 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000114 inputInfo1,
115 inputInfo2,
116 outputTensorInfo);
117 };
118
119 validateFunc(outputInfo, isSupported);
120 return isSupported ? kTfLiteOk : kTfLiteError;
121}
122
123TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
124 TfLiteContext* tfLiteContext,
125 const armnn::TensorInfo& inputInfo1,
126 const armnn::TensorInfo& inputInfo2,
127 const armnn::TensorInfo& outputInfo)
128{
129 bool isSupported = false;
130 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
131 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000132 FORWARD_LAYER_SUPPORT_FUNC("MINIMUM",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000133 tfLiteContext,
134 IsMinimumSupported,
135 delegateData.m_Backends,
136 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100137 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000138 inputInfo1,
139 inputInfo2,
140 outputTensorInfo);
141 };
142
143 validateFunc(outputInfo, isSupported);
144 return isSupported ? kTfLiteOk : kTfLiteError;
145}
146
147TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
148 TfLiteContext* tfLiteContext,
149 const armnn::TensorInfo& inputInfo1,
150 const armnn::TensorInfo& inputInfo2,
151 const armnn::TensorInfo& outputInfo)
152{
153 bool isSupported = false;
154 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
155 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000156 FORWARD_LAYER_SUPPORT_FUNC("MUL",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000157 tfLiteContext,
158 IsMultiplicationSupported,
159 delegateData.m_Backends,
160 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100161 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000162 inputInfo1,
163 inputInfo2,
164 outputTensorInfo);
165 };
166
167 validateFunc(outputInfo, isSupported);
168 return isSupported ? kTfLiteOk : kTfLiteError;
169}
170
171TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
172 TfLiteContext* tfLiteContext,
173 const armnn::TensorInfo& inputInfo1,
174 const armnn::TensorInfo& inputInfo2,
175 const armnn::TensorInfo& outputInfo)
176{
177 bool isSupported = false;
178 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
179 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000180 FORWARD_LAYER_SUPPORT_FUNC("SUB",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000181 tfLiteContext,
182 IsSubtractionSupported,
183 delegateData.m_Backends,
184 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100185 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000186 inputInfo1,
187 inputInfo2,
188 outputTensorInfo);
189 };
190
191 validateFunc(outputInfo, isSupported);
192 return isSupported ? kTfLiteOk : kTfLiteError;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000193}
194
Jim Flynn4b2f3472021-10-13 21:20:07 +0100195std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
196 DelegateData& delegateData,
197 const armnn::TensorInfo& outputTensorInfo)
198{
199 armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddDivisionLayer();
200 // if the output of the div is Signed32 the Floor layer is not required
201 if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
202 {
203 return std::make_pair(divisionLayer, divisionLayer);
204 }
205 armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
206 outputSlot.SetTensorInfo(outputTensorInfo);
207 armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
208 outputSlot.Connect(floorLayer->GetInputSlot(0));
209 return std::make_pair(divisionLayer, floorLayer);
210}
211
Sadik Armagan62483be2020-10-23 17:14:43 +0100212TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
213 TfLiteContext* tfLiteContext,
214 TfLiteNode* tfLiteNode,
215 int nodeIndex,
216 int32_t elementwiseBinaryOperatorCode)
217{
Sadik Armagan67e95f22020-10-29 16:14:54 +0000218 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
219 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
220
221 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
222 const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
223 if (IsDynamicTensor(tfLiteInputTensor0))
224 {
225 TF_LITE_MAYBE_KERNEL_LOG(
226 tfLiteContext,
227 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
228 elementwiseBinaryOperatorCode, nodeIndex);
229 return kTfLiteError;
230 }
231
232 const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
233 if (IsDynamicTensor(tfLiteInputTensor1))
234 {
235 TF_LITE_MAYBE_KERNEL_LOG(
236 tfLiteContext,
237 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
238 elementwiseBinaryOperatorCode, nodeIndex);
239 return kTfLiteError;
240 }
241
242 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
243 if (IsDynamicTensor(tfLiteOutputTensor))
244 {
245 TF_LITE_MAYBE_KERNEL_LOG(
246 tfLiteContext,
247 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
248 elementwiseBinaryOperatorCode, nodeIndex);
249 return kTfLiteError;
250 }
251
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000252 armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
253 armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
254
Sadik Armagan90a119b2022-08-05 16:12:49 +0100255 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000256
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000257 // Check if we need to expand the dims of the input tensor infos.
258 // This is required for a few of the backends.
259 if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
260 {
261 ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
262 }
263
Ryan OShea3ad2e142023-01-13 10:19:20 +0000264 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000265 TfLiteFusedActivation activationType = kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000266 if (tfLiteNodeParameters)
267 {
268 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000269 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
270 outputTensorInfo, activationType);
271 if(activationStatus != kTfLiteOk)
272 {
273 return kTfLiteError;
274 }
275 }
276
Sadik Armagan67e95f22020-10-29 16:14:54 +0000277 if (!delegateData.m_Network)
278 {
279 switch(elementwiseBinaryOperatorCode)
280 {
281 case kTfLiteBuiltinAdd:
282 return ValidateAddOperator(delegateData,
283 tfLiteContext,
284 inputTensorInfo0,
285 inputTensorInfo1,
286 outputTensorInfo);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000287 case kTfLiteBuiltinDiv:
288 return ValidateDivOperator(delegateData,
289 tfLiteContext,
290 inputTensorInfo0,
291 inputTensorInfo1,
292 outputTensorInfo);
Jim Flynn4b2f3472021-10-13 21:20:07 +0100293 case kTfLiteBuiltinFloorDiv:
294 return ValidateFloorDivOperator(delegateData,
295 tfLiteContext,
296 inputTensorInfo0,
297 inputTensorInfo1,
298 outputTensorInfo);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000299 case kTfLiteBuiltinMaximum:
300 return ValidateMaximumOperator(delegateData,
301 tfLiteContext,
302 inputTensorInfo0,
303 inputTensorInfo1,
304 outputTensorInfo);
305 case kTfLiteBuiltinMinimum:
306 return ValidateMinimumOperator(delegateData,
307 tfLiteContext,
308 inputTensorInfo0,
309 inputTensorInfo1,
310 outputTensorInfo);
311 case kTfLiteBuiltinMul:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000312 return ValidateMulOperator(delegateData,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000313 tfLiteContext,
314 inputTensorInfo0,
315 inputTensorInfo1,
316 outputTensorInfo);
317 case kTfLiteBuiltinSub:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000318 return ValidateSubOperator(delegateData,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000319 tfLiteContext,
320 inputTensorInfo0,
321 inputTensorInfo1,
322 outputTensorInfo);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000323 default:
324 return kTfLiteError;
325 }
326 }
327
328 armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
Jim Flynn4b2f3472021-10-13 21:20:07 +0100329 MultiLayerFacade multiLayer;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000330 switch(elementwiseBinaryOperatorCode)
331 {
332 case kTfLiteBuiltinAdd:
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000333 elementwiseBinaryLayer = delegateData.m_Network->AddAdditionLayer();
334 break;
335 case kTfLiteBuiltinDiv:
336 elementwiseBinaryLayer = delegateData.m_Network->AddDivisionLayer();
337 break;
Jim Flynn4b2f3472021-10-13 21:20:07 +0100338 case kTfLiteBuiltinFloorDiv:
339 {
340 auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
341 multiLayer.AssignValues(layers.first, layers.second);
342 elementwiseBinaryLayer = &multiLayer;
343 }
344 break;
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000345 case kTfLiteBuiltinMaximum:
346 elementwiseBinaryLayer = delegateData.m_Network->AddMaximumLayer();
347 break;
348 case kTfLiteBuiltinMinimum:
349 elementwiseBinaryLayer = delegateData.m_Network->AddMinimumLayer();
350 break;
351 case kTfLiteBuiltinMul:
352 elementwiseBinaryLayer = delegateData.m_Network->AddMultiplicationLayer();
353 break;
354 case kTfLiteBuiltinSub:
355 elementwiseBinaryLayer = delegateData.m_Network->AddSubtractionLayer();
Sadik Armagan67e95f22020-10-29 16:14:54 +0000356 break;
357 default:
358 return kTfLiteError;
359 }
360 ARMNN_ASSERT(elementwiseBinaryLayer != nullptr);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000361 armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0);
362 outputSlot.SetTensorInfo(outputTensorInfo);
363
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100364 auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
365 delegateData,
366 tfLiteContext,
367 tfLiteNode);
368 if (inputsTensorsProcess == kTfLiteError)
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000369 {
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100370 return inputsTensorsProcess;
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000371 }
372
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000373 if(Connect(elementwiseBinaryLayer, tfLiteNode, delegateData) != kTfLiteOk)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000374 {
375 return kTfLiteError;
376 }
377
Sadik Armagan67e95f22020-10-29 16:14:54 +0000378 if (!tfLiteNodeParameters)
379 {
380 // No Activation
381 return kTfLiteOk;
382 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000383 // Check and Create Activation
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000384 return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100385}
386
387} // namespace armnnDelegate