blob: a8559e254893d58fa83f0a377d42020cac85ea80 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan32ca1442020-11-13 17:51:56 +00008#include "DelegateUtils.hpp"
Ryan OShea3ad2e142023-01-13 10:19:20 +00009#include "SharedFunctions.hpp"
Sadik Armagan32ca1442020-11-13 17:51:56 +000010
Sadik Armagan62483be2020-10-23 17:14:43 +010011#include <tensorflow/lite/builtin_ops.h>
12#include <tensorflow/lite/c/builtin_op_data.h>
13#include <tensorflow/lite/c/common.h>
14#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan32ca1442020-11-13 17:51:56 +000015#include "tensorflow/lite/kernels/internal/tensor.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010016
17namespace armnnDelegate
18{
19
Sadik Armagan32ca1442020-11-13 17:51:56 +000020TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
21 TfLiteContext* tfLiteContext,
22 TfLiteNode* tfLiteNode,
23 int nodeIndex,
24 int32_t operatorCode)
25{
26 auto numInputs = tfLiteNode->inputs->size;
27 if (numInputs < 2)
28 {
29 TF_LITE_MAYBE_KERNEL_LOG(
30 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
31 2, numInputs, nodeIndex);
32 return kTfLiteError;
33 }
34 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
35
36 armnn::Convolution2dDescriptor descriptor;
37 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
38
Mike Kelly84d63782022-05-06 12:14:16 +010039 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +000040 descriptor.m_BiasEnabled = biasEnabled;
41 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
42 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
43 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
44 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
45 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
46
47 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
48 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
49 if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
50 {
51 TF_LITE_MAYBE_KERNEL_LOG(
52 tfLiteContext,
53 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
54 operatorCode, nodeIndex);
55 return kTfLiteError;
56 }
57 if (IsDynamicTensor(tfLiteInputTensor))
58 {
59 TF_LITE_MAYBE_KERNEL_LOG(
60 tfLiteContext,
61 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
62 operatorCode, nodeIndex);
63 return kTfLiteError;
64 }
65 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
66 if(!IsValid(&tfLiteOutputTensor))
67 {
68 TF_LITE_MAYBE_KERNEL_LOG(
69 tfLiteContext,
70 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
71 operatorCode, nodeIndex);
72 return kTfLiteError;
73 }
74 if (IsDynamicTensor(tfLiteOutputTensor))
75 {
76 TF_LITE_MAYBE_KERNEL_LOG(
77 tfLiteContext,
78 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
79 operatorCode, nodeIndex);
80 return kTfLiteError;
81 }
82
83 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
84 if(!IsValid(&tfLiteFilterTensor))
85 {
86 TF_LITE_MAYBE_KERNEL_LOG(
87 tfLiteContext,
88 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
89 operatorCode, nodeIndex);
90 return kTfLiteError;
91 }
92 if (IsDynamicTensor(tfLiteFilterTensor))
93 {
94 TF_LITE_MAYBE_KERNEL_LOG(
95 tfLiteContext,
96 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
97 nodeIndex);
98 return kTfLiteError;
99 }
100
101 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100102 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000103
Ryan OShea3ad2e142023-01-13 10:19:20 +0000104 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000105 TfLiteFusedActivation activationType=kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000106 if (tfLiteNodeParameters)
107 {
108 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000109 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
110 outputTensorInfo, activationType);
111 if(activationStatus != kTfLiteOk)
112 {
113 return kTfLiteError;
114 }
115
116 }
117
Ryan OShea4c231de2023-01-17 15:19:20 +0000118 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000119
120 armnn::TensorInfo biasTensorInfo;
121 if(biasEnabled)
122 {
123 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
124 if(!IsValid(&tfLiteBiasTensor))
125 {
126 TF_LITE_MAYBE_KERNEL_LOG(
127 tfLiteContext,
128 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
129 operatorCode, nodeIndex);
130 return kTfLiteError;
131 }
132 if (IsDynamicTensor(tfLiteBiasTensor))
133 {
134 TF_LITE_MAYBE_KERNEL_LOG(
135 tfLiteContext,
136 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
137 nodeIndex);
138 return kTfLiteError;
139 }
140 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
141 }
142 else
143 {
144 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
145 }
146
147 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
148
149 // TfLite uses NHWC tensors
150 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
151 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
152
153 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
154 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
155
156 // Calculate padding
157 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
158 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
159 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
160 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
161
Cathal Corbett53837672022-09-01 11:34:37 +0100162 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000163 if (!delegateData.m_Network)
164 {
165 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000166 FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000167 tfLiteContext,
168 IsConvolution2dSupported,
169 delegateData.m_Backends,
170 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100171 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000172 inputTensorInfo,
173 outputTensorInfo,
174 descriptor,
175 filterTensorInfo,
176 optionalBiasInfo);
177 return isSupported ? kTfLiteOk : kTfLiteError;
178 }
179
Sadik Armagan32ca1442020-11-13 17:51:56 +0000180 // Set up filter and biases
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100181 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100182 layer->SetBackendId(setBackend);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100183
Ryan OShea4c231de2023-01-17 15:19:20 +0000184 if(filterTensorInfo.IsConstant())
Sadik Armagan90a119b2022-08-05 16:12:49 +0100185 {
186 auto filter =
187 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
Ryan OShea4c231de2023-01-17 15:19:20 +0000188 filterTensorInfo);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000189
Sadik Armagan90a119b2022-08-05 16:12:49 +0100190 armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
191 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
192 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
193 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100194
195 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000196 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100197 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000198 if(biasTensorInfo.IsConstant())
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100199 {
200 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
201 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
202 ARMNN_ASSERT(biasLayer != nullptr);
203 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
204 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
205 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000206 }
207
Ryan OShea4c231de2023-01-17 15:19:20 +0000208 // The data input can also be constant, so we must check that this is also allocated to an input slot
209 if(inputTensorInfo.IsConstant())
210 {
211 auto input =
212 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
213 inputTensorInfo);
214
215 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
216 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
217 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
218 }
219
Sadik Armagan32ca1442020-11-13 17:51:56 +0000220 ARMNN_ASSERT(layer != nullptr);
221
222 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
223 outputSlot.SetTensorInfo(outputTensorInfo);
224
225 Connect(layer, tfLiteNode, delegateData);
226
Sadik Armagan32ca1442020-11-13 17:51:56 +0000227 if (!tfLiteNodeParameters)
228 {
229 // No Activation
230 return kTfLiteOk;
231 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000232 // Check and Create activation
Sadik Armagan32ca1442020-11-13 17:51:56 +0000233 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
234
235}
236
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100237// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
238#if defined(ARMNN_POST_TFLITE_2_5)
239TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
240 TfLiteContext* tfLiteContext,
241 TfLiteNode* tfLiteNode,
242 int nodeIndex,
243 int32_t operatorCode)
244{
245 auto numInputs = tfLiteNode->inputs->size;
246 if (numInputs < 2)
247 {
248 TF_LITE_MAYBE_KERNEL_LOG(
249 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
250 2, numInputs, nodeIndex);
251 return kTfLiteError;
252 }
253 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
254
255 armnn::Convolution3dDescriptor descriptor;
256 const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
257
Mike Kelly84d63782022-05-06 12:14:16 +0100258 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100259 descriptor.m_BiasEnabled = biasEnabled;
260 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
261 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
262 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
263 descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
264 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
265 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
266 descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
267
268 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
269 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
270 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
271 {
272 return kTfLiteError;
273 }
274
275 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
276 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
277 {
278 return kTfLiteError;
279 }
280
281 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
282 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
283 {
284 return kTfLiteError;
285 }
286
287 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100288 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100289
Ryan OShea3ad2e142023-01-13 10:19:20 +0000290 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000291 TfLiteFusedActivation activationType=kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000292 if (tfLiteNodeParameters)
293 {
294 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000295 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
296 outputTensorInfo, activationType);
297 if(activationStatus != kTfLiteOk)
298 {
299 return kTfLiteError;
300 }
301
302 }
303
Ryan OShea4c231de2023-01-17 15:19:20 +0000304 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100305
306 armnn::TensorInfo biasTensorInfo;
307 if(biasEnabled)
308 {
309 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
310 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
311 {
312 return kTfLiteError;
313 }
314 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
315 }
316 else
317 {
318 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
319 }
320
321 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
322
323 // TfLite uses NDHWC tensors
324 const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
325 const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
326 const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
327
328 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
329 const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
330 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
331 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
332
333 // Calculate padding
334 CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
335 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
336 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
337 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
338 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
339 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
340
341 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
342 // support for the operator
343 // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
Cathal Corbett53837672022-09-01 11:34:37 +0100344 armnn::BackendId setBackend;
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100345 if (!delegateData.m_Network)
346 {
347 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000348 FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100349 tfLiteContext,
350 IsConvolution3dSupported,
351 delegateData.m_Backends,
352 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100353 setBackend,
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100354 inputTensorInfo,
355 outputTensorInfo,
356 descriptor,
357 filterTensorInfo,
358 optionalBiasInfo);
359 return isSupported ? kTfLiteOk : kTfLiteError;
360 }
361
362 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100363 layer->SetBackendId(setBackend);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100364 ARMNN_ASSERT(layer != nullptr);
365
366 // Add a constant layer for weights and biases if inputs are constant,
367 // which are connected to the Convolution3d layer as inputs.
Ryan OShea4c231de2023-01-17 15:19:20 +0000368 if (filterTensorInfo.IsConstant())
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100369 {
370 auto filter = CreateConstTensor(&tfLiteFilterTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000371 filterTensorInfo);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100372
373 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
374 ARMNN_ASSERT(weightsLayer != nullptr);
375
376 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
377 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
378 }
379
380 if(biasEnabled)
381 {
382 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000383 if(biasTensorInfo.IsConstant())
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100384 {
385 auto biases = CreateConstTensor(&tfLiteBiasTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000386 biasTensorInfo);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100387
388 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
389 ARMNN_ASSERT(biasLayer != nullptr);
390
391 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
392 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
393 }
394 }
395
Ryan OShea4c231de2023-01-17 15:19:20 +0000396 // The data input can also be constant, so we must check that this is also allocated to an input slot
397 if(inputTensorInfo.IsConstant())
398 {
399 auto input =
400 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
401 inputTensorInfo);
402
403 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
404 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
405 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
406 }
407
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100408 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
409 outputSlot.SetTensorInfo(outputTensorInfo);
410
411 Connect(layer, tfLiteNode, delegateData);
412
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100413 if (!tfLiteNodeParameters)
414 {
415 // No Activation
416 return kTfLiteOk;
417 }
418
Ryan OShea3ad2e142023-01-13 10:19:20 +0000419 // Check and create activation
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100420 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
421}
422#endif
423
Sadik Armagan32ca1442020-11-13 17:51:56 +0000424TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
425 TfLiteContext* tfLiteContext,
426 TfLiteNode* tfLiteNode,
427 int nodeIndex,
428 int32_t operatorCode)
429{
430 auto numInputs = tfLiteNode->inputs->size;
431 if (numInputs < 2)
432 {
433 TF_LITE_MAYBE_KERNEL_LOG(
434 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
435 2, numInputs, nodeIndex);
436 return kTfLiteError;
437 }
438 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
439
Mike Kelly84d63782022-05-06 12:14:16 +0100440 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000441
442 armnn::DepthwiseConvolution2dDescriptor descriptor;
443 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
444
445 descriptor.m_BiasEnabled = biasEnabled;
446 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
447 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
448 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
449 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
450 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
451
452 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
453 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
454 if(!IsValid(&tfLiteInputTensor))
455 {
456 TF_LITE_MAYBE_KERNEL_LOG(
457 tfLiteContext,
458 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
459 operatorCode, nodeIndex);
460 return kTfLiteError;
461 }
462 if (IsDynamicTensor(tfLiteInputTensor))
463 {
464 TF_LITE_MAYBE_KERNEL_LOG(
465 tfLiteContext,
466 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
467 operatorCode, nodeIndex);
468 return kTfLiteError;
469 }
470 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
471 if(!IsValid(&tfLiteOutputTensor))
472 {
473 TF_LITE_MAYBE_KERNEL_LOG(
474 tfLiteContext,
475 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
476 operatorCode, nodeIndex);
477 return kTfLiteError;
478 }
479 if (IsDynamicTensor(tfLiteOutputTensor))
480 {
481 TF_LITE_MAYBE_KERNEL_LOG(
482 tfLiteContext,
483 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
484 operatorCode, nodeIndex);
485 return kTfLiteError;
486 }
487
488 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
489 if(!IsValid(&tfLiteFilterTensor))
490 {
491 TF_LITE_MAYBE_KERNEL_LOG(
492 tfLiteContext,
493 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
494 operatorCode, nodeIndex);
495 return kTfLiteError;
496 }
497 if (IsDynamicTensor(tfLiteFilterTensor))
498 {
499 TF_LITE_MAYBE_KERNEL_LOG(
500 tfLiteContext,
501 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
502 nodeIndex);
503 return kTfLiteError;
504 }
505
506 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100507 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000508
Ryan OShea3ad2e142023-01-13 10:19:20 +0000509 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000510 TfLiteFusedActivation activationType = kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000511 if (tfLiteNodeParameters)
512 {
513 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000514 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
515 outputTensorInfo, activationType);
516 if(activationStatus != kTfLiteOk)
517 {
518 return kTfLiteError;
519 }
520
521 }
522
Ryan OShea4c231de2023-01-17 15:19:20 +0000523 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000524
525 // Assuming input is NHWC
526 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
527 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
528
529 // TensorflowLite weights come in the format [1, H, W, I * M]
530 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
531 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
532
Sadik Armagan32ca1442020-11-13 17:51:56 +0000533 // Calculate padding
534 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
535 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
536 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
537 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
538
539 armnn::TensorInfo biasTensorInfo;
540 if(biasEnabled)
541 {
542 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
543 if(!IsValid(&tfLiteBiasTensor))
544 {
545 TF_LITE_MAYBE_KERNEL_LOG(
546 tfLiteContext,
547 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
548 operatorCode, nodeIndex);
549 return kTfLiteError;
550 }
551 if (IsDynamicTensor(tfLiteBiasTensor))
552 {
553 TF_LITE_MAYBE_KERNEL_LOG(
554 tfLiteContext,
555 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
556 nodeIndex);
557 return kTfLiteError;
558 }
559 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
560 }
561 else
562 {
563 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
564 }
565
Cathal Corbett53837672022-09-01 11:34:37 +0100566 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000567 if (!delegateData.m_Network)
568 {
569 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000570 FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000571 tfLiteContext,
572 IsDepthwiseConvolutionSupported,
573 delegateData.m_Backends,
574 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100575 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000576 inputTensorInfo,
577 outputTensorInfo,
578 descriptor,
Sadik Armagan90a119b2022-08-05 16:12:49 +0100579 filterTensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000580 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
581 return isSupported ? kTfLiteOk : kTfLiteError;
582 }
583
Cathal Corbett06902652022-04-14 17:55:11 +0100584 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100585 layer->SetBackendId(setBackend);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000586
Ryan OShea4c231de2023-01-17 15:19:20 +0000587 if(filterTensorInfo.IsConstant())
Sadik Armagan90a119b2022-08-05 16:12:49 +0100588 {
589 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
590 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
591
592 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
593 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
594 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
595 }
Cathal Corbett06902652022-04-14 17:55:11 +0100596
597 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000598 {
Cathal Corbett06902652022-04-14 17:55:11 +0100599 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000600 if(biasTensorInfo.IsConstant())
Cathal Corbett06902652022-04-14 17:55:11 +0100601 {
602 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
603 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
604 ARMNN_ASSERT(biasLayer != nullptr);
605 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
606 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
607 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000608 }
609
Ryan OShea4c231de2023-01-17 15:19:20 +0000610 // The data input can also be constant, so we must check that this is also allocated to an input slot
611 if(inputTensorInfo.IsConstant())
612 {
613 auto input =
614 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]],
615 inputTensorInfo);
616
617 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
618 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
619 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
620 }
621
Sadik Armagan32ca1442020-11-13 17:51:56 +0000622 ARMNN_ASSERT(layer != nullptr);
623
624 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
625 outputSlot.SetTensorInfo(outputTensorInfo);
626
627 Connect(layer, tfLiteNode, delegateData);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000628 if (!tfLiteNodeParameters)
629 {
630 // No Activation
631 return kTfLiteOk;
632 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000633 // Check and create activation
Sadik Armagan32ca1442020-11-13 17:51:56 +0000634 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
635}
636
637TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
638 TfLiteContext* tfLiteContext,
639 TfLiteNode* tfLiteNode,
640 int nodeIndex,
641 int32_t operatorCode)
642{
643 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
644 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
645
646 armnn::TransposeConvolution2dDescriptor descriptor;
647 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
648 descriptor.m_BiasEnabled = false;
649 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
650 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
651 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
652
653 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
654 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
655 if(!IsValid(&tfLiteOutputShapeTensor))
656 {
657 TF_LITE_MAYBE_KERNEL_LOG(
658 tfLiteContext,
659 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
660 operatorCode, nodeIndex);
661 return kTfLiteError;
662 }
663 if (IsDynamicTensor(tfLiteOutputShapeTensor))
664 {
665 TF_LITE_MAYBE_KERNEL_LOG(
666 tfLiteContext,
667 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
668 operatorCode, nodeIndex);
669 return kTfLiteError;
670 }
671
Ryan OShea4c231de2023-01-17 15:19:20 +0000672 const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
673 std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
674 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000675 {
Ryan OShea4c231de2023-01-17 15:19:20 +0000676 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000677 {
678 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
679 }
680 }
681
Ryan OShea4c231de2023-01-17 15:19:20 +0000682 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000683 {
Ryan OShea4c231de2023-01-17 15:19:20 +0000684 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); i++)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000685 {
686 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
687 }
688 }
689 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
690 for (int dimension : outputShape)
691 {
692 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
693 }
694 descriptor.m_OutputShapeEnabled = true;
695
696 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
697 if(!IsValid(&tfLiteInputTensor))
698 {
699 TF_LITE_MAYBE_KERNEL_LOG(
700 tfLiteContext,
701 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
702 operatorCode, nodeIndex);
703 return kTfLiteError;
704 }
705 if (IsDynamicTensor(tfLiteInputTensor))
706 {
707 TF_LITE_MAYBE_KERNEL_LOG(
708 tfLiteContext,
709 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
710 operatorCode, nodeIndex);
711 return kTfLiteError;
712 }
713
714 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
715 if(!IsValid(&tfLiteOutputTensor))
716 {
717 TF_LITE_MAYBE_KERNEL_LOG(
718 tfLiteContext,
719 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
720 operatorCode, nodeIndex);
721 return kTfLiteError;
722 }
723 if (IsDynamicTensor(tfLiteOutputTensor))
724 {
725 TF_LITE_MAYBE_KERNEL_LOG(
726 tfLiteContext,
727 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
728 operatorCode, nodeIndex);
729 return kTfLiteError;
730 }
731
732 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
733 if(!IsValid(&tfLiteFilterTensor))
734 {
735 TF_LITE_MAYBE_KERNEL_LOG(
736 tfLiteContext,
737 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
738 operatorCode, nodeIndex);
739 return kTfLiteError;
740 }
741 if (IsDynamicTensor(tfLiteFilterTensor))
742 {
743 TF_LITE_MAYBE_KERNEL_LOG(
744 tfLiteContext,
745 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
746 operatorCode, nodeIndex);
747 return kTfLiteError;
748 }
749
750 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100751 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Ryan OShea4c231de2023-01-17 15:19:20 +0000752 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000753
754 // TfLite uses NHWC tensors
755 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
756 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
757
758 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
759 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
760
761 // Calculate padding
762 CalcPadding(inputHeight,
763 filterHeight,
764 descriptor.m_StrideY,
765 1, // dilation y
766 descriptor.m_PadTop,
767 descriptor.m_PadBottom,
768 parameters->padding);
769 CalcPadding(inputWidth,
770 filterWidth,
771 descriptor.m_StrideX,
772 1, // dilation x
773 descriptor.m_PadLeft,
774 descriptor.m_PadRight,
775 parameters->padding);
776
777 // Set up filter
778 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000779 filterTensorInfo);
Cathal Corbett53837672022-09-01 11:34:37 +0100780 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000781 if (!delegateData.m_Network)
782 {
783 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000784 FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000785 tfLiteContext,
786 IsTransposeConvolution2dSupported,
787 delegateData.m_Backends,
788 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100789 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000790 inputTensorInfo,
791 outputTensorInfo,
792 descriptor,
793 filterTensorInfo,
794 armnn::EmptyOptional());
795 return isSupported ? kTfLiteOk : kTfLiteError;
796 }
797
798 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
799 filterTensor,
800 armnn::EmptyOptional());
Cathal Corbett53837672022-09-01 11:34:37 +0100801 layer->SetBackendId(setBackend);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000802 ARMNN_ASSERT(layer != nullptr);
803
Ryan OShea4c231de2023-01-17 15:19:20 +0000804 // The data input can be constant, so we must check that this is allocated to an input slot
805 if(inputTensorInfo.IsConstant())
806 {
807 auto input =
808 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
809 inputTensorInfo);
810
811 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input);
812 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
813 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
814 }
815
Sadik Armagan32ca1442020-11-13 17:51:56 +0000816 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
817 outputSlot.SetTensorInfo(outputTensorInfo);
818
819 // Connect
Keith Davis892fafe2020-11-26 17:40:35 +0000820 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000821 {
Keith Davis892fafe2020-11-26 17:40:35 +0000822 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
823 Connect(layer->GetInputSlot(0));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000824 }
825
826 // Prepare output slots
827 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
828 {
829 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Keith Davis892fafe2020-11-26 17:40:35 +0000830 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
831 &outputSlot;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000832 }
833 return kTfLiteOk;
834}
835
Sadik Armagan62483be2020-10-23 17:14:43 +0100836TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
837 TfLiteContext* tfLiteContext,
838 TfLiteNode* tfLiteNode,
839 int nodeIndex,
840 int32_t operatorCode)
841{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000842 switch(operatorCode)
843 {
844 case kTfLiteBuiltinConv2d:
845 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100846// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
847#if defined(ARMNN_POST_TFLITE_2_5)
848 case kTfLiteBuiltinConv3d:
849 return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
850#endif
Sadik Armagan32ca1442020-11-13 17:51:56 +0000851 case kTfLiteBuiltinDepthwiseConv2d:
852 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
853 case kTfLiteBuiltinTransposeConv:
854 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
855 default:
856 return kTfLiteError;
857 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100858}
859
860} // namespace armnnDelegate