blob: fa9021b5c1d03d7e7bfc9b56ad44aa032b612ae5 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan67e95f22020-10-29 16:14:54 +00008#include "DelegateUtils.hpp"
Jim Flynn4b2f3472021-10-13 21:20:07 +01009#include "MultiLayerFacade.hpp"
10#include "SharedFunctions.hpp"
Sadik Armagan67e95f22020-10-29 16:14:54 +000011
Sadik Armagan62483be2020-10-23 17:14:43 +010012#include <tensorflow/lite/builtin_ops.h>
13#include <tensorflow/lite/c/builtin_op_data.h>
14#include <tensorflow/lite/c/common.h>
15#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan05e9fd22020-11-17 12:01:47 +000016#include "tensorflow/lite/delegates/utils.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010017
18namespace armnnDelegate
19{
20
Sadik Armagan67e95f22020-10-29 16:14:54 +000021TfLiteStatus ValidateAddOperator(DelegateData& delegateData,
22 TfLiteContext* tfLiteContext,
23 const armnn::TensorInfo& inputInfo1,
24 const armnn::TensorInfo& inputInfo2,
25 const armnn::TensorInfo& outputInfo)
26{
27 bool isSupported = false;
28 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
29 {
Mike Kelly3ec30772023-03-08 13:47:17 +000030 std::vector<armnn::TensorInfo> infos { inputInfo1, inputInfo2, outputInfo };
Sadik Armaganbfa767c2022-02-09 14:58:03 +000031 FORWARD_LAYER_SUPPORT_FUNC("ADD",
Sadik Armagan67e95f22020-10-29 16:14:54 +000032 tfLiteContext,
Mike Kelly3ec30772023-03-08 13:47:17 +000033 IsElementwiseBinarySupported,
Sadik Armagan67e95f22020-10-29 16:14:54 +000034 delegateData.m_Backends,
35 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +010036 armnn::BackendId(),
Sadik Armagan67e95f22020-10-29 16:14:54 +000037 inputInfo1,
38 inputInfo2,
Mike Kelly3ec30772023-03-08 13:47:17 +000039 outputInfo,
40 armnn::BinaryOperation::Add);
Sadik Armagan67e95f22020-10-29 16:14:54 +000041 };
42
43 validateFunc(outputInfo, isSupported);
44 return isSupported ? kTfLiteOk : kTfLiteError;
45}
46
Jim Flynn4b2f3472021-10-13 21:20:07 +010047
Sadik Armagan21a94ff2020-11-09 08:38:30 +000048TfLiteStatus ValidateDivOperator(DelegateData& delegateData,
49 TfLiteContext* tfLiteContext,
50 const armnn::TensorInfo& inputInfo1,
51 const armnn::TensorInfo& inputInfo2,
52 const armnn::TensorInfo& outputInfo)
Sadik Armagan67e95f22020-10-29 16:14:54 +000053{
Sadik Armagan21a94ff2020-11-09 08:38:30 +000054 bool isSupported = false;
55 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
Sadik Armagan67e95f22020-10-29 16:14:54 +000056 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000057 FORWARD_LAYER_SUPPORT_FUNC("DIV",
Sadik Armagan21a94ff2020-11-09 08:38:30 +000058 tfLiteContext,
Mike Kelly3ec30772023-03-08 13:47:17 +000059 IsElementwiseBinarySupported,
Sadik Armagan21a94ff2020-11-09 08:38:30 +000060 delegateData.m_Backends,
61 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +010062 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +000063 inputInfo1,
64 inputInfo2,
Mike Kelly3ec30772023-03-08 13:47:17 +000065 outputTensorInfo,
66 armnn::BinaryOperation::Div);
Sadik Armagan21a94ff2020-11-09 08:38:30 +000067 };
Sadik Armagan67e95f22020-10-29 16:14:54 +000068
Sadik Armagan21a94ff2020-11-09 08:38:30 +000069 validateFunc(outputInfo, isSupported);
70 return isSupported ? kTfLiteOk : kTfLiteError;
71}
72
Jim Flynn4b2f3472021-10-13 21:20:07 +010073TfLiteStatus ValidateFloorDivOperator(DelegateData& delegateData,
74 TfLiteContext* tfLiteContext,
75 const armnn::TensorInfo& inputInfo1,
76 const armnn::TensorInfo& inputInfo2,
77 const armnn::TensorInfo& outputInfo)
78{
79 // need first to validate that the div operator is supported
80 // then that the floor operator is supported
81 TfLiteStatus status = ValidateDivOperator(delegateData, tfLiteContext, inputInfo1, inputInfo2, outputInfo);
82 if (status != kTfLiteOk)
83 {
84 return status;
85 }
86 // if the inputs and output of the div are all Signed32 we don't need to add the floor operator afterward.
87 if (AreAllSigned32(inputInfo1, inputInfo2, outputInfo))
88 {
89 return status;
90 }
91 // in case broadcasting is being done from one of the inputs to the div
92 // choose the full sized input tensor to pass to the floor validation routine
93 armnn::TensorInfo floorInputInfo = inputInfo1;
94 if (inputInfo1.GetNumDimensions() < inputInfo2.GetNumDimensions())
95 {
96 floorInputInfo = inputInfo2;
97 }
98 status = ValidateFloorOperator(delegateData, tfLiteContext, floorInputInfo, outputInfo);
99 return status;
100}
101
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000102TfLiteStatus ValidateMaximumOperator(DelegateData& delegateData,
103 TfLiteContext* tfLiteContext,
104 const armnn::TensorInfo& inputInfo1,
105 const armnn::TensorInfo& inputInfo2,
106 const armnn::TensorInfo& outputInfo)
107{
108 bool isSupported = false;
109 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
110 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000111 FORWARD_LAYER_SUPPORT_FUNC("MAXIMUM",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000112 tfLiteContext,
Mike Kelly3ec30772023-03-08 13:47:17 +0000113 IsElementwiseBinarySupported,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000114 delegateData.m_Backends,
115 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100116 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000117 inputInfo1,
118 inputInfo2,
Mike Kelly3ec30772023-03-08 13:47:17 +0000119 outputTensorInfo,
120 armnn::BinaryOperation::Maximum);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000121 };
122
123 validateFunc(outputInfo, isSupported);
124 return isSupported ? kTfLiteOk : kTfLiteError;
125}
126
127TfLiteStatus ValidateMinimumOperator(DelegateData& delegateData,
128 TfLiteContext* tfLiteContext,
129 const armnn::TensorInfo& inputInfo1,
130 const armnn::TensorInfo& inputInfo2,
131 const armnn::TensorInfo& outputInfo)
132{
133 bool isSupported = false;
134 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
135 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000136 FORWARD_LAYER_SUPPORT_FUNC("MINIMUM",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000137 tfLiteContext,
Mike Kelly3ec30772023-03-08 13:47:17 +0000138 IsElementwiseBinarySupported,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000139 delegateData.m_Backends,
140 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100141 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000142 inputInfo1,
143 inputInfo2,
Mike Kelly3ec30772023-03-08 13:47:17 +0000144 outputTensorInfo,
145 armnn::BinaryOperation::Minimum);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000146 };
147
148 validateFunc(outputInfo, isSupported);
149 return isSupported ? kTfLiteOk : kTfLiteError;
150}
151
152TfLiteStatus ValidateMulOperator(DelegateData& delegateData,
153 TfLiteContext* tfLiteContext,
154 const armnn::TensorInfo& inputInfo1,
155 const armnn::TensorInfo& inputInfo2,
156 const armnn::TensorInfo& outputInfo)
157{
158 bool isSupported = false;
159 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
160 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000161 FORWARD_LAYER_SUPPORT_FUNC("MUL",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000162 tfLiteContext,
Mike Kelly3ec30772023-03-08 13:47:17 +0000163 IsElementwiseBinarySupported,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000164 delegateData.m_Backends,
165 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100166 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000167 inputInfo1,
168 inputInfo2,
Mike Kelly3ec30772023-03-08 13:47:17 +0000169 outputTensorInfo,
170 armnn::BinaryOperation::Mul);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000171 };
172
173 validateFunc(outputInfo, isSupported);
174 return isSupported ? kTfLiteOk : kTfLiteError;
175}
176
177TfLiteStatus ValidateSubOperator(DelegateData& delegateData,
178 TfLiteContext* tfLiteContext,
179 const armnn::TensorInfo& inputInfo1,
180 const armnn::TensorInfo& inputInfo2,
181 const armnn::TensorInfo& outputInfo)
182{
183 bool isSupported = false;
184 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
185 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000186 FORWARD_LAYER_SUPPORT_FUNC("SUB",
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000187 tfLiteContext,
Mike Kelly3ec30772023-03-08 13:47:17 +0000188 IsElementwiseBinarySupported,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000189 delegateData.m_Backends,
190 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100191 armnn::BackendId(),
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000192 inputInfo1,
193 inputInfo2,
Mike Kelly3ec30772023-03-08 13:47:17 +0000194 outputTensorInfo,
195 armnn::BinaryOperation::Sub);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000196 };
197
198 validateFunc(outputInfo, isSupported);
199 return isSupported ? kTfLiteOk : kTfLiteError;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000200}
201
Jim Flynn4b2f3472021-10-13 21:20:07 +0100202std::pair<armnn::IConnectableLayer*, armnn::IConnectableLayer*> AddFloorDivLayer(
203 DelegateData& delegateData,
204 const armnn::TensorInfo& outputTensorInfo)
205{
Mike Kelly3ec30772023-03-08 13:47:17 +0000206 armnn::IConnectableLayer* divisionLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
207 armnn::BinaryOperation::Div);
Jim Flynn4b2f3472021-10-13 21:20:07 +0100208 // if the output of the div is Signed32 the Floor layer is not required
209 if (armnn::DataType::Signed32 == outputTensorInfo.GetDataType())
210 {
211 return std::make_pair(divisionLayer, divisionLayer);
212 }
213 armnn::IOutputSlot& outputSlot = divisionLayer->GetOutputSlot(0);
214 outputSlot.SetTensorInfo(outputTensorInfo);
215 armnn::IConnectableLayer* floorLayer = delegateData.m_Network->AddFloorLayer();
216 outputSlot.Connect(floorLayer->GetInputSlot(0));
217 return std::make_pair(divisionLayer, floorLayer);
218}
219
Sadik Armagan62483be2020-10-23 17:14:43 +0100220TfLiteStatus VisitElementwiseBinaryOperator(DelegateData& delegateData,
221 TfLiteContext* tfLiteContext,
222 TfLiteNode* tfLiteNode,
223 int nodeIndex,
224 int32_t elementwiseBinaryOperatorCode)
225{
Sadik Armagan67e95f22020-10-29 16:14:54 +0000226 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
227 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
228
229 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
230 const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
231 if (IsDynamicTensor(tfLiteInputTensor0))
232 {
233 TF_LITE_MAYBE_KERNEL_LOG(
234 tfLiteContext,
235 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
236 elementwiseBinaryOperatorCode, nodeIndex);
237 return kTfLiteError;
238 }
239
240 const TfLiteTensor& tfLiteInputTensor1 = tfLiteTensors[tfLiteNode->inputs->data[1]];
241 if (IsDynamicTensor(tfLiteInputTensor1))
242 {
243 TF_LITE_MAYBE_KERNEL_LOG(
244 tfLiteContext,
245 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
246 elementwiseBinaryOperatorCode, nodeIndex);
247 return kTfLiteError;
248 }
249
250 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
251 if (IsDynamicTensor(tfLiteOutputTensor))
252 {
253 TF_LITE_MAYBE_KERNEL_LOG(
254 tfLiteContext,
255 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
256 elementwiseBinaryOperatorCode, nodeIndex);
257 return kTfLiteError;
258 }
259
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000260 armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
261 armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
262
Sadik Armagan90a119b2022-08-05 16:12:49 +0100263 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000264
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000265 // Check if we need to expand the dims of the input tensor infos.
266 // This is required for a few of the backends.
267 if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
268 {
269 ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
270 }
271
Ryan OShea3ad2e142023-01-13 10:19:20 +0000272 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000273 TfLiteFusedActivation activationType = kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000274 if (tfLiteNodeParameters)
275 {
276 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000277 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
278 outputTensorInfo, activationType);
279 if(activationStatus != kTfLiteOk)
280 {
281 return kTfLiteError;
282 }
283 }
284
Sadik Armagan67e95f22020-10-29 16:14:54 +0000285 if (!delegateData.m_Network)
286 {
287 switch(elementwiseBinaryOperatorCode)
288 {
289 case kTfLiteBuiltinAdd:
290 return ValidateAddOperator(delegateData,
291 tfLiteContext,
292 inputTensorInfo0,
293 inputTensorInfo1,
294 outputTensorInfo);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000295 case kTfLiteBuiltinDiv:
296 return ValidateDivOperator(delegateData,
297 tfLiteContext,
298 inputTensorInfo0,
299 inputTensorInfo1,
300 outputTensorInfo);
Jim Flynn4b2f3472021-10-13 21:20:07 +0100301 case kTfLiteBuiltinFloorDiv:
302 return ValidateFloorDivOperator(delegateData,
303 tfLiteContext,
304 inputTensorInfo0,
305 inputTensorInfo1,
306 outputTensorInfo);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000307 case kTfLiteBuiltinMaximum:
308 return ValidateMaximumOperator(delegateData,
309 tfLiteContext,
310 inputTensorInfo0,
311 inputTensorInfo1,
312 outputTensorInfo);
313 case kTfLiteBuiltinMinimum:
314 return ValidateMinimumOperator(delegateData,
315 tfLiteContext,
316 inputTensorInfo0,
317 inputTensorInfo1,
318 outputTensorInfo);
319 case kTfLiteBuiltinMul:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000320 return ValidateMulOperator(delegateData,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000321 tfLiteContext,
322 inputTensorInfo0,
323 inputTensorInfo1,
324 outputTensorInfo);
325 case kTfLiteBuiltinSub:
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000326 return ValidateSubOperator(delegateData,
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000327 tfLiteContext,
328 inputTensorInfo0,
329 inputTensorInfo1,
330 outputTensorInfo);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000331 default:
332 return kTfLiteError;
333 }
334 }
335
336 armnn::IConnectableLayer* elementwiseBinaryLayer = nullptr;
Jim Flynn4b2f3472021-10-13 21:20:07 +0100337 MultiLayerFacade multiLayer;
Sadik Armagan67e95f22020-10-29 16:14:54 +0000338 switch(elementwiseBinaryOperatorCode)
339 {
340 case kTfLiteBuiltinAdd:
Mike Kelly3ec30772023-03-08 13:47:17 +0000341 elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
342 armnn::BinaryOperation::Add);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000343 break;
344 case kTfLiteBuiltinDiv:
Mike Kelly3ec30772023-03-08 13:47:17 +0000345 elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
346 armnn::BinaryOperation::Div);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000347 break;
Jim Flynn4b2f3472021-10-13 21:20:07 +0100348 case kTfLiteBuiltinFloorDiv:
349 {
350 auto layers = AddFloorDivLayer(delegateData, outputTensorInfo);
351 multiLayer.AssignValues(layers.first, layers.second);
352 elementwiseBinaryLayer = &multiLayer;
353 }
354 break;
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000355 case kTfLiteBuiltinMaximum:
Mike Kelly3ec30772023-03-08 13:47:17 +0000356 elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
357 armnn::BinaryOperation::Maximum);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000358 break;
359 case kTfLiteBuiltinMinimum:
Mike Kelly3ec30772023-03-08 13:47:17 +0000360 elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
361 armnn::BinaryOperation::Minimum);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000362 break;
363 case kTfLiteBuiltinMul:
Mike Kelly3ec30772023-03-08 13:47:17 +0000364 elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
365 armnn::BinaryOperation::Mul);
Sadik Armagan21a94ff2020-11-09 08:38:30 +0000366 break;
367 case kTfLiteBuiltinSub:
Mike Kelly3ec30772023-03-08 13:47:17 +0000368 elementwiseBinaryLayer = delegateData.m_Network->AddElementwiseBinaryLayer(
369 armnn::BinaryOperation::Sub);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000370 break;
371 default:
372 return kTfLiteError;
373 }
374 ARMNN_ASSERT(elementwiseBinaryLayer != nullptr);
Sadik Armagan67e95f22020-10-29 16:14:54 +0000375 armnn::IOutputSlot& outputSlot = elementwiseBinaryLayer->GetOutputSlot(0);
376 outputSlot.SetTensorInfo(outputTensorInfo);
377
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100378 auto inputsTensorsProcess = ProcessInputs(elementwiseBinaryLayer,
379 delegateData,
380 tfLiteContext,
381 tfLiteNode);
382 if (inputsTensorsProcess == kTfLiteError)
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000383 {
Sadik Armaganf7ac72c2021-05-05 15:03:50 +0100384 return inputsTensorsProcess;
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000385 }
386
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000387 if(Connect(elementwiseBinaryLayer, tfLiteNode, delegateData) != kTfLiteOk)
Sadik Armagan67e95f22020-10-29 16:14:54 +0000388 {
389 return kTfLiteError;
390 }
391
Sadik Armagan67e95f22020-10-29 16:14:54 +0000392 if (!tfLiteNodeParameters)
393 {
394 // No Activation
395 return kTfLiteOk;
396 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000397 // Check and Create Activation
Sadik Armagan05e9fd22020-11-17 12:01:47 +0000398 return FusedActivation(tfLiteContext, tfLiteNode, activationType, elementwiseBinaryLayer, 0, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100399}
400
401} // namespace armnnDelegate