blob: 107d4de21c4b30d1d5aaee29f674090019acda3f [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan11572322023-03-16 10:17:51 +00008#include <ClassicDelegateUtils.hpp>
9#include <SharedFunctions.hpp>
Sadik Armagan32ca1442020-11-13 17:51:56 +000010
Sadik Armagan62483be2020-10-23 17:14:43 +010011#include <tensorflow/lite/builtin_ops.h>
12#include <tensorflow/lite/c/builtin_op_data.h>
13#include <tensorflow/lite/c/common.h>
14#include <tensorflow/lite/minimal_logging.h>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000015#include <tensorflow/lite/kernels/internal/tensor.h>
Sadik Armagan62483be2020-10-23 17:14:43 +010016
17namespace armnnDelegate
18{
19
Sadik Armagan32ca1442020-11-13 17:51:56 +000020TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
21 TfLiteContext* tfLiteContext,
22 TfLiteNode* tfLiteNode,
23 int nodeIndex,
24 int32_t operatorCode)
25{
26 auto numInputs = tfLiteNode->inputs->size;
27 if (numInputs < 2)
28 {
29 TF_LITE_MAYBE_KERNEL_LOG(
30 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
31 2, numInputs, nodeIndex);
32 return kTfLiteError;
33 }
34 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
35
36 armnn::Convolution2dDescriptor descriptor;
37 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
38
Mike Kelly84d63782022-05-06 12:14:16 +010039 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +000040 descriptor.m_BiasEnabled = biasEnabled;
41 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
42 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
43 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
44 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
45 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
46
47 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
48 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
49 if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
50 {
51 TF_LITE_MAYBE_KERNEL_LOG(
52 tfLiteContext,
53 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
54 operatorCode, nodeIndex);
55 return kTfLiteError;
56 }
57 if (IsDynamicTensor(tfLiteInputTensor))
58 {
59 TF_LITE_MAYBE_KERNEL_LOG(
60 tfLiteContext,
61 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
62 operatorCode, nodeIndex);
63 return kTfLiteError;
64 }
65 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
66 if(!IsValid(&tfLiteOutputTensor))
67 {
68 TF_LITE_MAYBE_KERNEL_LOG(
69 tfLiteContext,
70 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
71 operatorCode, nodeIndex);
72 return kTfLiteError;
73 }
74 if (IsDynamicTensor(tfLiteOutputTensor))
75 {
76 TF_LITE_MAYBE_KERNEL_LOG(
77 tfLiteContext,
78 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
79 operatorCode, nodeIndex);
80 return kTfLiteError;
81 }
82
83 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
84 if(!IsValid(&tfLiteFilterTensor))
85 {
86 TF_LITE_MAYBE_KERNEL_LOG(
87 tfLiteContext,
88 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
89 operatorCode, nodeIndex);
90 return kTfLiteError;
91 }
92 if (IsDynamicTensor(tfLiteFilterTensor))
93 {
94 TF_LITE_MAYBE_KERNEL_LOG(
95 tfLiteContext,
96 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
97 nodeIndex);
98 return kTfLiteError;
99 }
100
101 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100102 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000103
Ryan OShea3ad2e142023-01-13 10:19:20 +0000104 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000105 TfLiteFusedActivation activationType=kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000106 if (tfLiteNodeParameters)
107 {
108 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000109 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
110 outputTensorInfo, activationType);
111 if(activationStatus != kTfLiteOk)
112 {
113 return kTfLiteError;
114 }
115
116 }
117
Ryan OShea4c231de2023-01-17 15:19:20 +0000118 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000119
120 armnn::TensorInfo biasTensorInfo;
121 if(biasEnabled)
122 {
123 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
124 if(!IsValid(&tfLiteBiasTensor))
125 {
126 TF_LITE_MAYBE_KERNEL_LOG(
127 tfLiteContext,
128 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
129 operatorCode, nodeIndex);
130 return kTfLiteError;
131 }
132 if (IsDynamicTensor(tfLiteBiasTensor))
133 {
134 TF_LITE_MAYBE_KERNEL_LOG(
135 tfLiteContext,
136 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
137 nodeIndex);
138 return kTfLiteError;
139 }
140 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
141 }
142 else
143 {
144 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
145 }
146
147 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
148
149 // TfLite uses NHWC tensors
150 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
151 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
152
153 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
154 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
155
156 // Calculate padding
157 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
158 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
159 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
160 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
161
Cathal Corbett53837672022-09-01 11:34:37 +0100162 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000163 if (!delegateData.m_Network)
164 {
165 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000166 FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000167 tfLiteContext,
168 IsConvolution2dSupported,
169 delegateData.m_Backends,
170 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100171 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000172 inputTensorInfo,
173 outputTensorInfo,
174 descriptor,
175 filterTensorInfo,
176 optionalBiasInfo);
177 return isSupported ? kTfLiteOk : kTfLiteError;
178 }
179
Sadik Armagan32ca1442020-11-13 17:51:56 +0000180 // Set up filter and biases
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100181 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100182 layer->SetBackendId(setBackend);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100183
Ryan OShea4c231de2023-01-17 15:19:20 +0000184 if(filterTensorInfo.IsConstant())
Sadik Armagan90a119b2022-08-05 16:12:49 +0100185 {
186 auto filter =
187 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
Ryan OShea4c231de2023-01-17 15:19:20 +0000188 filterTensorInfo);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000189
Sadik Armagan90a119b2022-08-05 16:12:49 +0100190 armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
191 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
192 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
193 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100194
195 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000196 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100197 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000198 if(biasTensorInfo.IsConstant())
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100199 {
200 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
201 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
202 ARMNN_ASSERT(biasLayer != nullptr);
203 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
204 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
205 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000206 }
207
Ryan OShea4c231de2023-01-17 15:19:20 +0000208 // The data input can also be constant, so we must check that this is also allocated to an input slot
209 if(inputTensorInfo.IsConstant())
210 {
211 auto input =
212 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
213 inputTensorInfo);
214
215 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
216 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
217 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
218 }
219
Sadik Armagan32ca1442020-11-13 17:51:56 +0000220 ARMNN_ASSERT(layer != nullptr);
221
222 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
223 outputSlot.SetTensorInfo(outputTensorInfo);
224
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000225 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
226 {
227 return kTfLiteError;
228 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000229
Sadik Armagan32ca1442020-11-13 17:51:56 +0000230 if (!tfLiteNodeParameters)
231 {
232 // No Activation
233 return kTfLiteOk;
234 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000235 // Check and Create activation
Sadik Armagan32ca1442020-11-13 17:51:56 +0000236 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
237
238}
239
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100240// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
241#if defined(ARMNN_POST_TFLITE_2_5)
242TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
243 TfLiteContext* tfLiteContext,
244 TfLiteNode* tfLiteNode,
245 int nodeIndex,
246 int32_t operatorCode)
247{
248 auto numInputs = tfLiteNode->inputs->size;
249 if (numInputs < 2)
250 {
251 TF_LITE_MAYBE_KERNEL_LOG(
252 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
253 2, numInputs, nodeIndex);
254 return kTfLiteError;
255 }
256 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
257
258 armnn::Convolution3dDescriptor descriptor;
259 const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
260
Mike Kelly84d63782022-05-06 12:14:16 +0100261 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100262 descriptor.m_BiasEnabled = biasEnabled;
263 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
264 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
265 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
266 descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
267 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
268 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
269 descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
270
271 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
272 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
273 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
274 {
275 return kTfLiteError;
276 }
277
278 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
279 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
280 {
281 return kTfLiteError;
282 }
283
284 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
285 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
286 {
287 return kTfLiteError;
288 }
289
290 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100291 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100292
Ryan OShea3ad2e142023-01-13 10:19:20 +0000293 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000294 TfLiteFusedActivation activationType=kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000295 if (tfLiteNodeParameters)
296 {
297 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000298 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
299 outputTensorInfo, activationType);
300 if(activationStatus != kTfLiteOk)
301 {
302 return kTfLiteError;
303 }
304
305 }
306
Ryan OShea4c231de2023-01-17 15:19:20 +0000307 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100308
309 armnn::TensorInfo biasTensorInfo;
310 if(biasEnabled)
311 {
312 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
313 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
314 {
315 return kTfLiteError;
316 }
317 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
318 }
319 else
320 {
321 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
322 }
323
324 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
325
326 // TfLite uses NDHWC tensors
327 const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
328 const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
329 const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
330
331 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
332 const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
333 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
334 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
335
336 // Calculate padding
337 CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
338 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
339 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
340 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
341 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
342 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
343
344 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
345 // support for the operator
346 // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
Cathal Corbett53837672022-09-01 11:34:37 +0100347 armnn::BackendId setBackend;
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100348 if (!delegateData.m_Network)
349 {
350 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000351 FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100352 tfLiteContext,
353 IsConvolution3dSupported,
354 delegateData.m_Backends,
355 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100356 setBackend,
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100357 inputTensorInfo,
358 outputTensorInfo,
359 descriptor,
360 filterTensorInfo,
361 optionalBiasInfo);
362 return isSupported ? kTfLiteOk : kTfLiteError;
363 }
364
365 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100366 layer->SetBackendId(setBackend);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100367 ARMNN_ASSERT(layer != nullptr);
368
369 // Add a constant layer for weights and biases if inputs are constant,
370 // which are connected to the Convolution3d layer as inputs.
Ryan OShea4c231de2023-01-17 15:19:20 +0000371 if (filterTensorInfo.IsConstant())
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100372 {
373 auto filter = CreateConstTensor(&tfLiteFilterTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000374 filterTensorInfo);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100375
376 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
377 ARMNN_ASSERT(weightsLayer != nullptr);
378
379 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
380 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
381 }
382
383 if(biasEnabled)
384 {
385 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000386 if(biasTensorInfo.IsConstant())
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100387 {
388 auto biases = CreateConstTensor(&tfLiteBiasTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000389 biasTensorInfo);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100390
391 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
392 ARMNN_ASSERT(biasLayer != nullptr);
393
394 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
395 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
396 }
397 }
398
Ryan OShea4c231de2023-01-17 15:19:20 +0000399 // The data input can also be constant, so we must check that this is also allocated to an input slot
400 if(inputTensorInfo.IsConstant())
401 {
402 auto input =
403 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
404 inputTensorInfo);
405
406 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
407 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
408 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
409 }
410
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100411 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
412 outputSlot.SetTensorInfo(outputTensorInfo);
413
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000414 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
415 {
416 return kTfLiteError;
417 }
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100418
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100419 if (!tfLiteNodeParameters)
420 {
421 // No Activation
422 return kTfLiteOk;
423 }
424
Ryan OShea3ad2e142023-01-13 10:19:20 +0000425 // Check and create activation
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100426 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
427}
428#endif
429
Sadik Armagan32ca1442020-11-13 17:51:56 +0000430TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
431 TfLiteContext* tfLiteContext,
432 TfLiteNode* tfLiteNode,
433 int nodeIndex,
434 int32_t operatorCode)
435{
436 auto numInputs = tfLiteNode->inputs->size;
437 if (numInputs < 2)
438 {
439 TF_LITE_MAYBE_KERNEL_LOG(
440 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
441 2, numInputs, nodeIndex);
442 return kTfLiteError;
443 }
444 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
445
Mike Kelly84d63782022-05-06 12:14:16 +0100446 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000447
448 armnn::DepthwiseConvolution2dDescriptor descriptor;
449 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
450
451 descriptor.m_BiasEnabled = biasEnabled;
452 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
453 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
454 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
455 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
456 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
457
458 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
459 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
460 if(!IsValid(&tfLiteInputTensor))
461 {
462 TF_LITE_MAYBE_KERNEL_LOG(
463 tfLiteContext,
464 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
465 operatorCode, nodeIndex);
466 return kTfLiteError;
467 }
468 if (IsDynamicTensor(tfLiteInputTensor))
469 {
470 TF_LITE_MAYBE_KERNEL_LOG(
471 tfLiteContext,
472 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
473 operatorCode, nodeIndex);
474 return kTfLiteError;
475 }
476 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
477 if(!IsValid(&tfLiteOutputTensor))
478 {
479 TF_LITE_MAYBE_KERNEL_LOG(
480 tfLiteContext,
481 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
482 operatorCode, nodeIndex);
483 return kTfLiteError;
484 }
485 if (IsDynamicTensor(tfLiteOutputTensor))
486 {
487 TF_LITE_MAYBE_KERNEL_LOG(
488 tfLiteContext,
489 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
490 operatorCode, nodeIndex);
491 return kTfLiteError;
492 }
493
494 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
495 if(!IsValid(&tfLiteFilterTensor))
496 {
497 TF_LITE_MAYBE_KERNEL_LOG(
498 tfLiteContext,
499 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
500 operatorCode, nodeIndex);
501 return kTfLiteError;
502 }
503 if (IsDynamicTensor(tfLiteFilterTensor))
504 {
505 TF_LITE_MAYBE_KERNEL_LOG(
506 tfLiteContext,
507 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
508 nodeIndex);
509 return kTfLiteError;
510 }
511
512 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100513 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000514
Ryan OShea3ad2e142023-01-13 10:19:20 +0000515 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000516 TfLiteFusedActivation activationType = kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000517 if (tfLiteNodeParameters)
518 {
519 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000520 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
521 outputTensorInfo, activationType);
522 if(activationStatus != kTfLiteOk)
523 {
524 return kTfLiteError;
525 }
526
527 }
528
Ryan OShea4c231de2023-01-17 15:19:20 +0000529 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000530
531 // Assuming input is NHWC
532 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
533 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
534
535 // TensorflowLite weights come in the format [1, H, W, I * M]
536 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
537 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
538
Sadik Armagan32ca1442020-11-13 17:51:56 +0000539 // Calculate padding
540 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
541 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
542 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
543 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
544
545 armnn::TensorInfo biasTensorInfo;
546 if(biasEnabled)
547 {
548 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
549 if(!IsValid(&tfLiteBiasTensor))
550 {
551 TF_LITE_MAYBE_KERNEL_LOG(
552 tfLiteContext,
553 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
554 operatorCode, nodeIndex);
555 return kTfLiteError;
556 }
557 if (IsDynamicTensor(tfLiteBiasTensor))
558 {
559 TF_LITE_MAYBE_KERNEL_LOG(
560 tfLiteContext,
561 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
562 nodeIndex);
563 return kTfLiteError;
564 }
565 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
566 }
567 else
568 {
569 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
570 }
571
Cathal Corbett53837672022-09-01 11:34:37 +0100572 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000573 if (!delegateData.m_Network)
574 {
575 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000576 FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000577 tfLiteContext,
578 IsDepthwiseConvolutionSupported,
579 delegateData.m_Backends,
580 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100581 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000582 inputTensorInfo,
583 outputTensorInfo,
584 descriptor,
Sadik Armagan90a119b2022-08-05 16:12:49 +0100585 filterTensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000586 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
587 return isSupported ? kTfLiteOk : kTfLiteError;
588 }
589
Cathal Corbett06902652022-04-14 17:55:11 +0100590 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100591 layer->SetBackendId(setBackend);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000592
Ryan OShea4c231de2023-01-17 15:19:20 +0000593 if(filterTensorInfo.IsConstant())
Sadik Armagan90a119b2022-08-05 16:12:49 +0100594 {
595 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
596 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
597
598 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
599 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
600 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
601 }
Cathal Corbett06902652022-04-14 17:55:11 +0100602
603 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000604 {
Cathal Corbett06902652022-04-14 17:55:11 +0100605 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000606 if(biasTensorInfo.IsConstant())
Cathal Corbett06902652022-04-14 17:55:11 +0100607 {
608 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
609 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
610 ARMNN_ASSERT(biasLayer != nullptr);
611 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
612 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
613 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000614 }
615
Ryan OShea4c231de2023-01-17 15:19:20 +0000616 // The data input can also be constant, so we must check that this is also allocated to an input slot
617 if(inputTensorInfo.IsConstant())
618 {
619 auto input =
620 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
621 inputTensorInfo);
622
623 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
624 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
625 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
626 }
627
Sadik Armagan32ca1442020-11-13 17:51:56 +0000628 ARMNN_ASSERT(layer != nullptr);
629
630 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
631 outputSlot.SetTensorInfo(outputTensorInfo);
632
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000633 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
634 {
635 return kTfLiteError;
636 }
637
Sadik Armagan32ca1442020-11-13 17:51:56 +0000638 if (!tfLiteNodeParameters)
639 {
640 // No Activation
641 return kTfLiteOk;
642 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000643 // Check and create activation
Sadik Armagan32ca1442020-11-13 17:51:56 +0000644 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
645}
646
647TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
648 TfLiteContext* tfLiteContext,
649 TfLiteNode* tfLiteNode,
650 int nodeIndex,
651 int32_t operatorCode)
652{
653 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
654 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
655
656 armnn::TransposeConvolution2dDescriptor descriptor;
657 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
658 descriptor.m_BiasEnabled = false;
659 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
660 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
661 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
662
663 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
664 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
665 if(!IsValid(&tfLiteOutputShapeTensor))
666 {
667 TF_LITE_MAYBE_KERNEL_LOG(
668 tfLiteContext,
669 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
670 operatorCode, nodeIndex);
671 return kTfLiteError;
672 }
673 if (IsDynamicTensor(tfLiteOutputShapeTensor))
674 {
675 TF_LITE_MAYBE_KERNEL_LOG(
676 tfLiteContext,
677 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
678 operatorCode, nodeIndex);
679 return kTfLiteError;
680 }
681
Ryan OShea4c231de2023-01-17 15:19:20 +0000682 const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
683 std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
684 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000685 {
Ryan OShea4c231de2023-01-17 15:19:20 +0000686 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000687 {
688 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
689 }
690 }
691
Ryan OShea4c231de2023-01-17 15:19:20 +0000692 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000693 {
Ryan OShea4c231de2023-01-17 15:19:20 +0000694 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000695 {
696 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
697 }
698 }
699 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
700 for (int dimension : outputShape)
701 {
702 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
703 }
704 descriptor.m_OutputShapeEnabled = true;
705
706 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
707 if(!IsValid(&tfLiteInputTensor))
708 {
709 TF_LITE_MAYBE_KERNEL_LOG(
710 tfLiteContext,
711 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
712 operatorCode, nodeIndex);
713 return kTfLiteError;
714 }
715 if (IsDynamicTensor(tfLiteInputTensor))
716 {
717 TF_LITE_MAYBE_KERNEL_LOG(
718 tfLiteContext,
719 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
720 operatorCode, nodeIndex);
721 return kTfLiteError;
722 }
723
724 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
725 if(!IsValid(&tfLiteOutputTensor))
726 {
727 TF_LITE_MAYBE_KERNEL_LOG(
728 tfLiteContext,
729 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
730 operatorCode, nodeIndex);
731 return kTfLiteError;
732 }
733 if (IsDynamicTensor(tfLiteOutputTensor))
734 {
735 TF_LITE_MAYBE_KERNEL_LOG(
736 tfLiteContext,
737 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
738 operatorCode, nodeIndex);
739 return kTfLiteError;
740 }
741
742 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
743 if(!IsValid(&tfLiteFilterTensor))
744 {
745 TF_LITE_MAYBE_KERNEL_LOG(
746 tfLiteContext,
747 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
748 operatorCode, nodeIndex);
749 return kTfLiteError;
750 }
751 if (IsDynamicTensor(tfLiteFilterTensor))
752 {
753 TF_LITE_MAYBE_KERNEL_LOG(
754 tfLiteContext,
755 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
756 operatorCode, nodeIndex);
757 return kTfLiteError;
758 }
759
760 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100761 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Ryan OShea4c231de2023-01-17 15:19:20 +0000762 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000763
764 // TfLite uses NHWC tensors
765 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
766 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
767
768 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
769 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
770
771 // Calculate padding
772 CalcPadding(inputHeight,
773 filterHeight,
774 descriptor.m_StrideY,
775 1, // dilation y
776 descriptor.m_PadTop,
777 descriptor.m_PadBottom,
778 parameters->padding);
779 CalcPadding(inputWidth,
780 filterWidth,
781 descriptor.m_StrideX,
782 1, // dilation x
783 descriptor.m_PadLeft,
784 descriptor.m_PadRight,
785 parameters->padding);
786
787 // Set up filter
788 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000789 filterTensorInfo);
Cathal Corbett53837672022-09-01 11:34:37 +0100790 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000791 if (!delegateData.m_Network)
792 {
793 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000794 FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000795 tfLiteContext,
796 IsTransposeConvolution2dSupported,
797 delegateData.m_Backends,
798 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100799 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000800 inputTensorInfo,
801 outputTensorInfo,
802 descriptor,
803 filterTensorInfo,
804 armnn::EmptyOptional());
805 return isSupported ? kTfLiteOk : kTfLiteError;
806 }
807
808 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
809 filterTensor,
810 armnn::EmptyOptional());
Cathal Corbett53837672022-09-01 11:34:37 +0100811 layer->SetBackendId(setBackend);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000812 ARMNN_ASSERT(layer != nullptr);
813
Ryan OShea4c231de2023-01-17 15:19:20 +0000814 // The data input can be constant, so we must check that this is allocated to an input slot
815 if(inputTensorInfo.IsConstant())
816 {
817 auto input =
818 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
819 inputTensorInfo);
820
821 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
822 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
823 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
824 }
825
Sadik Armagan32ca1442020-11-13 17:51:56 +0000826 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
827 outputSlot.SetTensorInfo(outputTensorInfo);
828
829 // Connect
Keith Davis892fafe2020-11-26 17:40:35 +0000830 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000831 {
Keith Davis892fafe2020-11-26 17:40:35 +0000832 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
833 Connect(layer->GetInputSlot(0));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000834 }
835
836 // Prepare output slots
837 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
838 {
839 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Keith Davis892fafe2020-11-26 17:40:35 +0000840 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
841 &outputSlot;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000842 }
843 return kTfLiteOk;
844}
845
Sadik Armagan62483be2020-10-23 17:14:43 +0100846TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
847 TfLiteContext* tfLiteContext,
848 TfLiteNode* tfLiteNode,
849 int nodeIndex,
850 int32_t operatorCode)
851{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000852 switch(operatorCode)
853 {
854 case kTfLiteBuiltinConv2d:
855 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100856// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
857#if defined(ARMNN_POST_TFLITE_2_5)
858 case kTfLiteBuiltinConv3d:
859 return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
860#endif
Sadik Armagan32ca1442020-11-13 17:51:56 +0000861 case kTfLiteBuiltinDepthwiseConv2d:
862 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
863 case kTfLiteBuiltinTransposeConv:
864 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
865 default:
866 return kTfLiteError;
867 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100868}
869
870} // namespace armnnDelegate