blob: cf0134ec1ff146acf119b946ccb6f0b7c7fe3f68 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan11572322023-03-16 10:17:51 +00008#include <ClassicDelegateUtils.hpp>
9#include <SharedFunctions.hpp>
Sadik Armagan32ca1442020-11-13 17:51:56 +000010
Sadik Armagan62483be2020-10-23 17:14:43 +010011#include <tensorflow/lite/builtin_ops.h>
12#include <tensorflow/lite/c/builtin_op_data.h>
13#include <tensorflow/lite/c/common.h>
14#include <tensorflow/lite/minimal_logging.h>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000015#include <tensorflow/lite/kernels/internal/tensor.h>
Sadik Armagan62483be2020-10-23 17:14:43 +010016
17namespace armnnDelegate
18{
19
Sadik Armagan32ca1442020-11-13 17:51:56 +000020TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
21 TfLiteContext* tfLiteContext,
22 TfLiteNode* tfLiteNode,
23 int nodeIndex,
24 int32_t operatorCode)
25{
26 auto numInputs = tfLiteNode->inputs->size;
27 if (numInputs < 2)
28 {
29 TF_LITE_MAYBE_KERNEL_LOG(
30 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
31 2, numInputs, nodeIndex);
32 return kTfLiteError;
33 }
34 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
35
36 armnn::Convolution2dDescriptor descriptor;
37 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
38
Mike Kelly84d63782022-05-06 12:14:16 +010039 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +000040 descriptor.m_BiasEnabled = biasEnabled;
41 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
42 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
43 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
44 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
45 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
46
47 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
48 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +010049 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +000050 {
Sadik Armagan32ca1442020-11-13 17:51:56 +000051 return kTfLiteError;
52 }
Matthew Sloyanc52190a2023-05-08 11:33:55 +010053
Sadik Armagan32ca1442020-11-13 17:51:56 +000054 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +010055 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +000056 {
Sadik Armagan32ca1442020-11-13 17:51:56 +000057 return kTfLiteError;
58 }
59
60 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +010061 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +000062 {
Sadik Armagan32ca1442020-11-13 17:51:56 +000063 return kTfLiteError;
64 }
65
66 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +010067 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +000068
Ryan OShea3ad2e142023-01-13 10:19:20 +000069 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +000070 TfLiteFusedActivation activationType=kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +000071 if (tfLiteNodeParameters)
72 {
73 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +000074 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
75 outputTensorInfo, activationType);
76 if(activationStatus != kTfLiteOk)
77 {
78 return kTfLiteError;
79 }
80
81 }
82
Ryan OShea4c231de2023-01-17 15:19:20 +000083 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +000084
85 armnn::TensorInfo biasTensorInfo;
86 if(biasEnabled)
87 {
88 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +010089 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +000090 {
Sadik Armagan32ca1442020-11-13 17:51:56 +000091 return kTfLiteError;
92 }
93 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
94 }
95 else
96 {
97 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
98 }
99
100 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
101
102 // TfLite uses NHWC tensors
103 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
104 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
105
106 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
107 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
108
109 // Calculate padding
110 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
111 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
112 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
113 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
114
Cathal Corbett53837672022-09-01 11:34:37 +0100115 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000116 if (!delegateData.m_Network)
117 {
118 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000119 FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000120 tfLiteContext,
121 IsConvolution2dSupported,
122 delegateData.m_Backends,
123 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100124 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000125 inputTensorInfo,
126 outputTensorInfo,
127 descriptor,
128 filterTensorInfo,
129 optionalBiasInfo);
130 return isSupported ? kTfLiteOk : kTfLiteError;
131 }
132
Sadik Armagan32ca1442020-11-13 17:51:56 +0000133 // Set up filter and biases
Mike Kelly07169c82023-08-02 13:23:09 +0100134 auto layerName = GetLayerName(armnn::LayerType::Convolution2d, nodeIndex);
135 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor, layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100136 layer->SetBackendId(setBackend);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100137
Mike Kelly07169c82023-08-02 13:23:09 +0100138 if (filterTensorInfo.IsConstant())
Sadik Armagan90a119b2022-08-05 16:12:49 +0100139 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100140 auto filter = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]], filterTensorInfo);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000141
Mike Kelly07169c82023-08-02 13:23:09 +0100142 auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
143 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Sadik Armagan90a119b2022-08-05 16:12:49 +0100144 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
145 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
146 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100147
148 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000149 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100150 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000151 if(biasTensorInfo.IsConstant())
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100152 {
153 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
Mike Kelly07169c82023-08-02 13:23:09 +0100154
155 auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
156 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
157 biasName.c_str());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100158 ARMNN_ASSERT(biasLayer != nullptr);
159 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
160 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
161 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000162 }
163
Ryan OShea4c231de2023-01-17 15:19:20 +0000164 // The data input can also be constant, so we must check that this is also allocated to an input slot
Mike Kelly07169c82023-08-02 13:23:09 +0100165 if (inputTensorInfo.IsConstant())
Ryan OShea4c231de2023-01-17 15:19:20 +0000166 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100167 auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000168
Mike Kelly07169c82023-08-02 13:23:09 +0100169 auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
170 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Ryan OShea4c231de2023-01-17 15:19:20 +0000171 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
172 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
173 }
174
Sadik Armagan32ca1442020-11-13 17:51:56 +0000175 ARMNN_ASSERT(layer != nullptr);
176
177 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
178 outputSlot.SetTensorInfo(outputTensorInfo);
179
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000180 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
181 {
182 return kTfLiteError;
183 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000184
Sadik Armagan32ca1442020-11-13 17:51:56 +0000185 if (!tfLiteNodeParameters)
186 {
187 // No Activation
188 return kTfLiteOk;
189 }
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100190
Ryan OShea3ad2e142023-01-13 10:19:20 +0000191 // Check and Create activation
Mike Kelly07169c82023-08-02 13:23:09 +0100192 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000193}
194
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100195// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
196#if defined(ARMNN_POST_TFLITE_2_5)
197TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
198 TfLiteContext* tfLiteContext,
199 TfLiteNode* tfLiteNode,
200 int nodeIndex,
201 int32_t operatorCode)
202{
203 auto numInputs = tfLiteNode->inputs->size;
204 if (numInputs < 2)
205 {
206 TF_LITE_MAYBE_KERNEL_LOG(
207 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
208 2, numInputs, nodeIndex);
209 return kTfLiteError;
210 }
211 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
212
213 armnn::Convolution3dDescriptor descriptor;
214 const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
215
Mike Kelly84d63782022-05-06 12:14:16 +0100216 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100217 descriptor.m_BiasEnabled = biasEnabled;
218 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
219 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
220 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
221 descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
222 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
223 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
224 descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
225
226 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
227 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
228 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
229 {
230 return kTfLiteError;
231 }
232
233 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
234 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
235 {
236 return kTfLiteError;
237 }
238
239 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
240 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
241 {
242 return kTfLiteError;
243 }
244
245 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100246 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100247
Ryan OShea3ad2e142023-01-13 10:19:20 +0000248 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000249 TfLiteFusedActivation activationType=kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000250 if (tfLiteNodeParameters)
251 {
252 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000253 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
254 outputTensorInfo, activationType);
255 if(activationStatus != kTfLiteOk)
256 {
257 return kTfLiteError;
258 }
259
260 }
261
Ryan OShea4c231de2023-01-17 15:19:20 +0000262 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100263
264 armnn::TensorInfo biasTensorInfo;
265 if(biasEnabled)
266 {
267 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
268 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
269 {
270 return kTfLiteError;
271 }
272 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
273 }
274 else
275 {
276 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
277 }
278
279 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
280
281 // TfLite uses NDHWC tensors
282 const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
283 const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
284 const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
285
286 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
287 const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
288 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
289 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
290
291 // Calculate padding
292 CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
293 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
294 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
295 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
296 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
297 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
298
299 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
300 // support for the operator
301 // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
Cathal Corbett53837672022-09-01 11:34:37 +0100302 armnn::BackendId setBackend;
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100303 if (!delegateData.m_Network)
304 {
305 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000306 FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100307 tfLiteContext,
308 IsConvolution3dSupported,
309 delegateData.m_Backends,
310 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100311 setBackend,
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100312 inputTensorInfo,
313 outputTensorInfo,
314 descriptor,
315 filterTensorInfo,
316 optionalBiasInfo);
317 return isSupported ? kTfLiteOk : kTfLiteError;
318 }
319
Mike Kelly07169c82023-08-02 13:23:09 +0100320 auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
321 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100322 layer->SetBackendId(setBackend);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100323 ARMNN_ASSERT(layer != nullptr);
324
325 // Add a constant layer for weights and biases if inputs are constant,
326 // which are connected to the Convolution3d layer as inputs.
Ryan OShea4c231de2023-01-17 15:19:20 +0000327 if (filterTensorInfo.IsConstant())
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100328 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100329 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100330
Mike Kelly07169c82023-08-02 13:23:09 +0100331 auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
332 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100333 ARMNN_ASSERT(weightsLayer != nullptr);
334
335 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
336 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
337 }
338
339 if(biasEnabled)
340 {
341 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000342 if(biasTensorInfo.IsConstant())
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100343 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100344 auto biases = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100345
Mike Kelly07169c82023-08-02 13:23:09 +0100346 auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
347 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases, biasName.c_str());
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100348 ARMNN_ASSERT(biasLayer != nullptr);
349
350 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
351 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
352 }
353 }
354
Ryan OShea4c231de2023-01-17 15:19:20 +0000355 // The data input can also be constant, so we must check that this is also allocated to an input slot
356 if(inputTensorInfo.IsConstant())
357 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100358 auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000359
Mike Kelly07169c82023-08-02 13:23:09 +0100360 auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
361 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Ryan OShea4c231de2023-01-17 15:19:20 +0000362 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
363 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
364 }
365
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100366 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
367 outputSlot.SetTensorInfo(outputTensorInfo);
368
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000369 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
370 {
371 return kTfLiteError;
372 }
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100373
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100374 if (!tfLiteNodeParameters)
375 {
376 // No Activation
377 return kTfLiteOk;
378 }
379
Ryan OShea3ad2e142023-01-13 10:19:20 +0000380 // Check and create activation
Mike Kelly07169c82023-08-02 13:23:09 +0100381 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100382}
383#endif
384
Sadik Armagan32ca1442020-11-13 17:51:56 +0000385TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
386 TfLiteContext* tfLiteContext,
387 TfLiteNode* tfLiteNode,
388 int nodeIndex,
389 int32_t operatorCode)
390{
391 auto numInputs = tfLiteNode->inputs->size;
392 if (numInputs < 2)
393 {
394 TF_LITE_MAYBE_KERNEL_LOG(
395 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
396 2, numInputs, nodeIndex);
397 return kTfLiteError;
398 }
399 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
400
Mike Kelly84d63782022-05-06 12:14:16 +0100401 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000402
403 armnn::DepthwiseConvolution2dDescriptor descriptor;
404 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
405
406 descriptor.m_BiasEnabled = biasEnabled;
407 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
408 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
409 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
410 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
411 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
412
413 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
414 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100415 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000416 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000417 return kTfLiteError;
418 }
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100419
Sadik Armagan32ca1442020-11-13 17:51:56 +0000420 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100421 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000422 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000423 return kTfLiteError;
424 }
425
426 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100427 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000428 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000429 return kTfLiteError;
430 }
431
432 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100433 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000434
Ryan OShea3ad2e142023-01-13 10:19:20 +0000435 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000436 TfLiteFusedActivation activationType = kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000437 if (tfLiteNodeParameters)
438 {
439 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000440 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
441 outputTensorInfo, activationType);
442 if(activationStatus != kTfLiteOk)
443 {
444 return kTfLiteError;
445 }
446
447 }
448
Ryan OShea4c231de2023-01-17 15:19:20 +0000449 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000450
451 // Assuming input is NHWC
452 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
453 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
454
455 // TensorflowLite weights come in the format [1, H, W, I * M]
456 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
457 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
458
Sadik Armagan32ca1442020-11-13 17:51:56 +0000459 // Calculate padding
460 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
461 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
462 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
463 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
464
465 armnn::TensorInfo biasTensorInfo;
466 if(biasEnabled)
467 {
468 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100469 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000470 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000471 return kTfLiteError;
472 }
473 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
474 }
475 else
476 {
477 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
478 }
479
Cathal Corbett53837672022-09-01 11:34:37 +0100480 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000481 if (!delegateData.m_Network)
482 {
483 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000484 FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000485 tfLiteContext,
486 IsDepthwiseConvolutionSupported,
487 delegateData.m_Backends,
488 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100489 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000490 inputTensorInfo,
491 outputTensorInfo,
492 descriptor,
Sadik Armagan90a119b2022-08-05 16:12:49 +0100493 filterTensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000494 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
495 return isSupported ? kTfLiteOk : kTfLiteError;
496 }
497
Mike Kelly07169c82023-08-02 13:23:09 +0100498 auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
499 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
500 layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100501 layer->SetBackendId(setBackend);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000502
Ryan OShea4c231de2023-01-17 15:19:20 +0000503 if(filterTensorInfo.IsConstant())
Sadik Armagan90a119b2022-08-05 16:12:49 +0100504 {
505 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
506 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
507
Mike Kelly07169c82023-08-02 13:23:09 +0100508 auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
509 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Sadik Armagan90a119b2022-08-05 16:12:49 +0100510 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
511 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
512 }
Cathal Corbett06902652022-04-14 17:55:11 +0100513
514 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000515 {
Cathal Corbett06902652022-04-14 17:55:11 +0100516 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000517 if(biasTensorInfo.IsConstant())
Cathal Corbett06902652022-04-14 17:55:11 +0100518 {
519 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
Mike Kelly07169c82023-08-02 13:23:09 +0100520
521 auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
522 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
523 biasName.c_str());
Cathal Corbett06902652022-04-14 17:55:11 +0100524 ARMNN_ASSERT(biasLayer != nullptr);
525 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
526 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
527 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000528 }
529
Ryan OShea4c231de2023-01-17 15:19:20 +0000530 // The data input can also be constant, so we must check that this is also allocated to an input slot
531 if(inputTensorInfo.IsConstant())
532 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100533 auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000534
Mike Kelly07169c82023-08-02 13:23:09 +0100535 auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
536 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Ryan OShea4c231de2023-01-17 15:19:20 +0000537 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
538 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
539 }
540
Sadik Armagan32ca1442020-11-13 17:51:56 +0000541 ARMNN_ASSERT(layer != nullptr);
542
543 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
544 outputSlot.SetTensorInfo(outputTensorInfo);
545
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000546 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
547 {
548 return kTfLiteError;
549 }
550
Sadik Armagan32ca1442020-11-13 17:51:56 +0000551 if (!tfLiteNodeParameters)
552 {
553 // No Activation
554 return kTfLiteOk;
555 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000556 // Check and create activation
Mike Kelly07169c82023-08-02 13:23:09 +0100557 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000558}
559
560TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
561 TfLiteContext* tfLiteContext,
562 TfLiteNode* tfLiteNode,
563 int nodeIndex,
564 int32_t operatorCode)
565{
566 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
567 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
568
569 armnn::TransposeConvolution2dDescriptor descriptor;
570 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
571 descriptor.m_BiasEnabled = false;
572 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
573 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
574 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
575
576 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
577 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100578 if (!IsValid(tfLiteContext, tfLiteOutputShapeTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000579 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000580 return kTfLiteError;
581 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000582
583 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100584 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000585 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000586 return kTfLiteError;
587 }
588
589 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100590 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000591 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000592 return kTfLiteError;
593 }
594
595 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100596 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000597 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000598 return kTfLiteError;
599 }
600
601 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100602 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Ryan OShea4c231de2023-01-17 15:19:20 +0000603 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000604
605 // TfLite uses NHWC tensors
606 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
607 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
608
609 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
610 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
611
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100612 // This block determines the output shape of the transpose convolution.
613 // If the output shape tensor is a constant, we can access the data at load time and set the shape of the layer.
614 // If this is not constant, we do not have access to the shape data, so we have to use infer output shape.
615 if (tflite::IsConstantTensor(&tfLiteOutputShapeTensor))
616 {
617 const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
618 std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
619 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
620 {
621 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); ++i)
622 {
623 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
624 }
625 }
626
627 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
628 {
629 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); ++i)
630 {
631 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
632 }
633 }
634 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
635 for (int dimension : outputShape)
636 {
637 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
638 }
639 descriptor.m_OutputShapeEnabled = true;
640
641 // TfLite uses NHWC tensors
642 const unsigned int outputHeight = descriptor.m_OutputShape[1];
643 const unsigned int outputWidth = descriptor.m_OutputShape[2];
644
645 CalcPadding(inputHeight,
646 filterHeight,
647 descriptor.m_StrideY,
648 1, // DilationY
649 descriptor.m_PadTop,
650 descriptor.m_PadBottom,
651 parameters->padding,
652 outputHeight);
653
654 CalcPadding(inputWidth,
655 filterWidth,
656 descriptor.m_StrideX,
657 1, // DilationX
658 descriptor.m_PadLeft,
659 descriptor.m_PadRight,
660 parameters->padding,
661 outputWidth);
662 }
663 else
664 {
665 CalcPadding(inputHeight,
666 filterHeight,
667 descriptor.m_StrideY,
668 1, // DilationY
669 descriptor.m_PadTop,
670 descriptor.m_PadBottom,
671 parameters->padding);
672
673 CalcPadding(inputWidth,
674 filterWidth,
675 descriptor.m_StrideX,
676 1, // DilationX
677 descriptor.m_PadLeft,
678 descriptor.m_PadRight,
679 parameters->padding);
680 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000681
682 // Set up filter
683 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000684 filterTensorInfo);
Cathal Corbett53837672022-09-01 11:34:37 +0100685 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000686 if (!delegateData.m_Network)
687 {
688 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000689 FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000690 tfLiteContext,
691 IsTransposeConvolution2dSupported,
692 delegateData.m_Backends,
693 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100694 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000695 inputTensorInfo,
696 outputTensorInfo,
697 descriptor,
698 filterTensorInfo,
699 armnn::EmptyOptional());
700 return isSupported ? kTfLiteOk : kTfLiteError;
701 }
702
Mike Kelly07169c82023-08-02 13:23:09 +0100703 auto layerName = GetLayerName(armnn::LayerType::TransposeConvolution2d, nodeIndex);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000704 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
705 filterTensor,
Mike Kelly07169c82023-08-02 13:23:09 +0100706 armnn::EmptyOptional(),
707 layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100708 layer->SetBackendId(setBackend);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000709 ARMNN_ASSERT(layer != nullptr);
710
Ryan OShea4c231de2023-01-17 15:19:20 +0000711 // The data input can be constant, so we must check that this is allocated to an input slot
712 if(inputTensorInfo.IsConstant())
713 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100714 auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]], inputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000715
Mike Kelly07169c82023-08-02 13:23:09 +0100716 auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
717 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Ryan OShea4c231de2023-01-17 15:19:20 +0000718 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
719 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
720 }
721
Sadik Armagan32ca1442020-11-13 17:51:56 +0000722 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
723 outputSlot.SetTensorInfo(outputTensorInfo);
724
725 // Connect
Keith Davis892fafe2020-11-26 17:40:35 +0000726 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000727 {
Keith Davis892fafe2020-11-26 17:40:35 +0000728 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
729 Connect(layer->GetInputSlot(0));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000730 }
731
732 // Prepare output slots
733 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
734 {
735 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Keith Davis892fafe2020-11-26 17:40:35 +0000736 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
737 &outputSlot;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000738 }
739 return kTfLiteOk;
740}
741
Sadik Armagan62483be2020-10-23 17:14:43 +0100742TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
743 TfLiteContext* tfLiteContext,
744 TfLiteNode* tfLiteNode,
745 int nodeIndex,
746 int32_t operatorCode)
747{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000748 switch(operatorCode)
749 {
750 case kTfLiteBuiltinConv2d:
751 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100752// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
753#if defined(ARMNN_POST_TFLITE_2_5)
754 case kTfLiteBuiltinConv3d:
755 return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
756#endif
Sadik Armagan32ca1442020-11-13 17:51:56 +0000757 case kTfLiteBuiltinDepthwiseConv2d:
758 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
759 case kTfLiteBuiltinTransposeConv:
760 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
761 default:
762 return kTfLiteError;
763 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100764}
765
766} // namespace armnnDelegate