blob: 71ecd4c97ac4bee77fcc7361a71a9a058e38eacc [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan11572322023-03-16 10:17:51 +00008#include <ClassicDelegateUtils.hpp>
9#include <SharedFunctions.hpp>
Sadik Armagan32ca1442020-11-13 17:51:56 +000010
Sadik Armagan62483be2020-10-23 17:14:43 +010011#include <tensorflow/lite/builtin_ops.h>
12#include <tensorflow/lite/c/builtin_op_data.h>
13#include <tensorflow/lite/c/common.h>
14#include <tensorflow/lite/minimal_logging.h>
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +000015#include <tensorflow/lite/kernels/internal/tensor.h>
Sadik Armagan62483be2020-10-23 17:14:43 +010016
17namespace armnnDelegate
18{
19
Sadik Armagan32ca1442020-11-13 17:51:56 +000020TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
21 TfLiteContext* tfLiteContext,
22 TfLiteNode* tfLiteNode,
23 int nodeIndex,
24 int32_t operatorCode)
25{
26 auto numInputs = tfLiteNode->inputs->size;
27 if (numInputs < 2)
28 {
29 TF_LITE_MAYBE_KERNEL_LOG(
30 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
31 2, numInputs, nodeIndex);
32 return kTfLiteError;
33 }
34 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
35
36 armnn::Convolution2dDescriptor descriptor;
37 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
38
Mike Kelly84d63782022-05-06 12:14:16 +010039 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +000040 descriptor.m_BiasEnabled = biasEnabled;
41 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
42 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
43 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
44 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
45 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
46
47 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
48 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +010049 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +000050 {
Sadik Armagan32ca1442020-11-13 17:51:56 +000051 return kTfLiteError;
52 }
Matthew Sloyanc52190a2023-05-08 11:33:55 +010053
Sadik Armagan32ca1442020-11-13 17:51:56 +000054 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +010055 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +000056 {
Sadik Armagan32ca1442020-11-13 17:51:56 +000057 return kTfLiteError;
58 }
Sadik Armagan32ca1442020-11-13 17:51:56 +000059 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +010060 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +000061 {
Sadik Armagan32ca1442020-11-13 17:51:56 +000062 return kTfLiteError;
63 }
64
65 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +010066 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +000067
Ryan OShea3ad2e142023-01-13 10:19:20 +000068 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +000069 TfLiteFusedActivation activationType=kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +000070 if (tfLiteNodeParameters)
71 {
72 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +000073 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
74 outputTensorInfo, activationType);
75 if(activationStatus != kTfLiteOk)
76 {
77 return kTfLiteError;
78 }
79
80 }
81
Ryan OShea4c231de2023-01-17 15:19:20 +000082 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +000083
84 armnn::TensorInfo biasTensorInfo;
85 if(biasEnabled)
86 {
87 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Mike Kelly080d45d2023-11-10 17:11:53 +000088
Matthew Sloyanc52190a2023-05-08 11:33:55 +010089 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +000090 {
Sadik Armagan32ca1442020-11-13 17:51:56 +000091 return kTfLiteError;
92 }
93 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
94 }
95 else
96 {
97 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
98 }
99
100 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
101
102 // TfLite uses NHWC tensors
103 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
104 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
105
106 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
107 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
108
109 // Calculate padding
110 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
111 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
112 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
113 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
114
Cathal Corbett53837672022-09-01 11:34:37 +0100115 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000116 if (!delegateData.m_Network)
117 {
Mike Kelly080d45d2023-11-10 17:11:53 +0000118 bool filterIsConst = filterTensorInfo.IsConstant();
119
120 if (!filterIsConst)
121 {
122 filterIsConst = WillInputBeOptimizedToConst(tfLiteContext, tfLiteNode->inputs->data[1]);
123 }
124 armnn::TensorInfo filterTensorInfoCopy(filterTensorInfo);
125 filterTensorInfoCopy.SetConstant(filterIsConst);
126 armnn::Optional<armnn::TensorInfo> optionalBiasInfoCopy(biasTensorInfo);
127
128 if (biasEnabled)
129 {
130 bool biasIsConst = biasTensorInfo.IsConstant();
131
132 if (!biasIsConst)
133 {
134 biasIsConst = WillInputBeOptimizedToConst(tfLiteContext, tfLiteNode->inputs->data[2]);
135 }
136 optionalBiasInfoCopy.value().SetConstant(biasIsConst);
137 }
138
Sadik Armagan32ca1442020-11-13 17:51:56 +0000139 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000140 FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000141 tfLiteContext,
142 IsConvolution2dSupported,
143 delegateData.m_Backends,
144 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100145 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000146 inputTensorInfo,
147 outputTensorInfo,
148 descriptor,
Mike Kelly080d45d2023-11-10 17:11:53 +0000149 filterTensorInfoCopy,
150 optionalBiasInfoCopy);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000151 return isSupported ? kTfLiteOk : kTfLiteError;
152 }
153
Sadik Armagan32ca1442020-11-13 17:51:56 +0000154 // Set up filter and biases
Mike Kelly07169c82023-08-02 13:23:09 +0100155 auto layerName = GetLayerName(armnn::LayerType::Convolution2d, nodeIndex);
156 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor, layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100157 layer->SetBackendId(setBackend);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100158
Mike Kelly07169c82023-08-02 13:23:09 +0100159 if (filterTensorInfo.IsConstant())
Sadik Armagan90a119b2022-08-05 16:12:49 +0100160 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100161 auto filter = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]], filterTensorInfo);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000162
Mike Kelly07169c82023-08-02 13:23:09 +0100163 auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
164 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Sadik Armagan90a119b2022-08-05 16:12:49 +0100165 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
166 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
167 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100168
169 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000170 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100171 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000172 if(biasTensorInfo.IsConstant())
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100173 {
174 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
Mike Kelly07169c82023-08-02 13:23:09 +0100175
176 auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
177 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
178 biasName.c_str());
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100179 ARMNN_ASSERT(biasLayer != nullptr);
180 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
181 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
182 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000183 }
184
Ryan OShea4c231de2023-01-17 15:19:20 +0000185 // The data input can also be constant, so we must check that this is also allocated to an input slot
Mike Kelly07169c82023-08-02 13:23:09 +0100186 if (inputTensorInfo.IsConstant())
Ryan OShea4c231de2023-01-17 15:19:20 +0000187 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100188 auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000189
Mike Kelly07169c82023-08-02 13:23:09 +0100190 auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
191 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Ryan OShea4c231de2023-01-17 15:19:20 +0000192 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
193 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
194 }
195
Sadik Armagan32ca1442020-11-13 17:51:56 +0000196 ARMNN_ASSERT(layer != nullptr);
197
198 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
199 outputSlot.SetTensorInfo(outputTensorInfo);
200
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000201 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
202 {
203 return kTfLiteError;
204 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000205
Sadik Armagan32ca1442020-11-13 17:51:56 +0000206 if (!tfLiteNodeParameters)
207 {
208 // No Activation
209 return kTfLiteOk;
210 }
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100211
Ryan OShea3ad2e142023-01-13 10:19:20 +0000212 // Check and Create activation
Mike Kelly07169c82023-08-02 13:23:09 +0100213 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000214}
215
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100216// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
217#if defined(ARMNN_POST_TFLITE_2_5)
218TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
219 TfLiteContext* tfLiteContext,
220 TfLiteNode* tfLiteNode,
221 int nodeIndex,
222 int32_t operatorCode)
223{
224 auto numInputs = tfLiteNode->inputs->size;
225 if (numInputs < 2)
226 {
227 TF_LITE_MAYBE_KERNEL_LOG(
228 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
229 2, numInputs, nodeIndex);
230 return kTfLiteError;
231 }
232 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
233
234 armnn::Convolution3dDescriptor descriptor;
235 const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
236
Mike Kelly84d63782022-05-06 12:14:16 +0100237 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100238 descriptor.m_BiasEnabled = biasEnabled;
239 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
240 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
241 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
242 descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
243 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
244 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
245 descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
246
247 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
248 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
249 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
250 {
251 return kTfLiteError;
252 }
253
254 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
255 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
256 {
257 return kTfLiteError;
258 }
259
260 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
261 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
262 {
263 return kTfLiteError;
264 }
265
266 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100267 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100268
Ryan OShea3ad2e142023-01-13 10:19:20 +0000269 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000270 TfLiteFusedActivation activationType=kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000271 if (tfLiteNodeParameters)
272 {
273 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000274 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
275 outputTensorInfo, activationType);
276 if(activationStatus != kTfLiteOk)
277 {
278 return kTfLiteError;
279 }
280
281 }
282
Ryan OShea4c231de2023-01-17 15:19:20 +0000283 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100284
285 armnn::TensorInfo biasTensorInfo;
286 if(biasEnabled)
287 {
288 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
289 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
290 {
291 return kTfLiteError;
292 }
293 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
294 }
295 else
296 {
297 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
298 }
299
300 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
301
302 // TfLite uses NDHWC tensors
303 const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
304 const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
305 const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
306
307 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
308 const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
309 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
310 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
311
312 // Calculate padding
313 CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
314 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
315 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
316 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
317 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
318 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
319
320 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
321 // support for the operator
322 // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
Cathal Corbett53837672022-09-01 11:34:37 +0100323 armnn::BackendId setBackend;
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100324 if (!delegateData.m_Network)
325 {
326 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000327 FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100328 tfLiteContext,
329 IsConvolution3dSupported,
330 delegateData.m_Backends,
331 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100332 setBackend,
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100333 inputTensorInfo,
334 outputTensorInfo,
335 descriptor,
336 filterTensorInfo,
337 optionalBiasInfo);
338 return isSupported ? kTfLiteOk : kTfLiteError;
339 }
340
Mike Kelly07169c82023-08-02 13:23:09 +0100341 auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
342 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100343 layer->SetBackendId(setBackend);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100344 ARMNN_ASSERT(layer != nullptr);
345
346 // Add a constant layer for weights and biases if inputs are constant,
347 // which are connected to the Convolution3d layer as inputs.
Ryan OShea4c231de2023-01-17 15:19:20 +0000348 if (filterTensorInfo.IsConstant())
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100349 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100350 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100351
Mike Kelly07169c82023-08-02 13:23:09 +0100352 auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
353 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100354 ARMNN_ASSERT(weightsLayer != nullptr);
355
356 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
357 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
358 }
359
360 if(biasEnabled)
361 {
362 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000363 if(biasTensorInfo.IsConstant())
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100364 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100365 auto biases = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100366
Mike Kelly07169c82023-08-02 13:23:09 +0100367 auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
368 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases, biasName.c_str());
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100369 ARMNN_ASSERT(biasLayer != nullptr);
370
371 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
372 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
373 }
374 }
375
Ryan OShea4c231de2023-01-17 15:19:20 +0000376 // The data input can also be constant, so we must check that this is also allocated to an input slot
377 if(inputTensorInfo.IsConstant())
378 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100379 auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000380
Mike Kelly07169c82023-08-02 13:23:09 +0100381 auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
382 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Ryan OShea4c231de2023-01-17 15:19:20 +0000383 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
384 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
385 }
386
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100387 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
388 outputSlot.SetTensorInfo(outputTensorInfo);
389
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000390 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
391 {
392 return kTfLiteError;
393 }
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100394
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100395 if (!tfLiteNodeParameters)
396 {
397 // No Activation
398 return kTfLiteOk;
399 }
400
Ryan OShea3ad2e142023-01-13 10:19:20 +0000401 // Check and create activation
Mike Kelly07169c82023-08-02 13:23:09 +0100402 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100403}
404#endif
405
Sadik Armagan32ca1442020-11-13 17:51:56 +0000406TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
407 TfLiteContext* tfLiteContext,
408 TfLiteNode* tfLiteNode,
409 int nodeIndex,
410 int32_t operatorCode)
411{
412 auto numInputs = tfLiteNode->inputs->size;
413 if (numInputs < 2)
414 {
415 TF_LITE_MAYBE_KERNEL_LOG(
416 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
417 2, numInputs, nodeIndex);
418 return kTfLiteError;
419 }
420 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
421
Mike Kelly84d63782022-05-06 12:14:16 +0100422 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000423
424 armnn::DepthwiseConvolution2dDescriptor descriptor;
425 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
426
427 descriptor.m_BiasEnabled = biasEnabled;
428 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
429 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
430 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
431 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
432 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
433
434 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
435 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100436 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000437 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000438 return kTfLiteError;
439 }
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100440
Sadik Armagan32ca1442020-11-13 17:51:56 +0000441 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100442 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000443 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000444 return kTfLiteError;
445 }
446
447 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100448 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000449 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000450 return kTfLiteError;
451 }
452
453 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100454 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000455
Ryan OShea3ad2e142023-01-13 10:19:20 +0000456 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
Ryan OShea475c7a82023-01-30 14:24:15 +0000457 TfLiteFusedActivation activationType = kTfLiteActNone;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000458 if (tfLiteNodeParameters)
459 {
460 activationType = tfLiteNodeParameters->activation;
Ryan OShea3ad2e142023-01-13 10:19:20 +0000461 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
462 outputTensorInfo, activationType);
463 if(activationStatus != kTfLiteOk)
464 {
465 return kTfLiteError;
466 }
467
468 }
469
Ryan OShea4c231de2023-01-17 15:19:20 +0000470 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000471
472 // Assuming input is NHWC
473 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
474 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
475
476 // TensorflowLite weights come in the format [1, H, W, I * M]
477 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
478 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
479
Sadik Armagan32ca1442020-11-13 17:51:56 +0000480 // Calculate padding
481 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
482 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
483 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
484 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
485
486 armnn::TensorInfo biasTensorInfo;
487 if(biasEnabled)
488 {
489 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100490 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000491 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000492 return kTfLiteError;
493 }
494 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
495 }
496 else
497 {
498 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
499 }
500
Cathal Corbett53837672022-09-01 11:34:37 +0100501 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000502 if (!delegateData.m_Network)
503 {
Mike Kelly080d45d2023-11-10 17:11:53 +0000504 bool filterIsConst = filterTensorInfo.IsConstant();
505
506 if (!filterIsConst)
507 {
508 filterIsConst = WillInputBeOptimizedToConst(tfLiteContext, tfLiteNode->inputs->data[1]);
509 }
510 armnn::TensorInfo filterTensorInfoCopy(filterTensorInfo);
511 filterTensorInfoCopy.SetConstant(filterIsConst);
512
513 armnn::Optional<armnn::TensorInfo> optionalBiasInfoCopy(biasTensorInfo);
514
515 if (biasEnabled)
516 {
517 bool biasIsConst = biasTensorInfo.IsConstant();
518
519 if (!biasIsConst)
520 {
521 biasIsConst = WillInputBeOptimizedToConst(tfLiteContext, tfLiteNode->inputs->data[2]);
522 }
523 optionalBiasInfoCopy.value().SetConstant(biasIsConst);
524 }
525
Sadik Armagan32ca1442020-11-13 17:51:56 +0000526 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000527 FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000528 tfLiteContext,
529 IsDepthwiseConvolutionSupported,
530 delegateData.m_Backends,
531 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100532 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000533 inputTensorInfo,
534 outputTensorInfo,
535 descriptor,
Mike Kelly080d45d2023-11-10 17:11:53 +0000536 filterTensorInfoCopy,
537 optionalBiasInfoCopy);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000538 return isSupported ? kTfLiteOk : kTfLiteError;
539 }
540
Mike Kelly07169c82023-08-02 13:23:09 +0100541 auto layerName = GetLayerName(armnn::LayerType::Convolution3d, nodeIndex);
542 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
543 layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100544 layer->SetBackendId(setBackend);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000545
Ryan OShea4c231de2023-01-17 15:19:20 +0000546 if(filterTensorInfo.IsConstant())
Sadik Armagan90a119b2022-08-05 16:12:49 +0100547 {
548 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
549 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
550
Mike Kelly07169c82023-08-02 13:23:09 +0100551 auto filterName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Filter");
552 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Sadik Armagan90a119b2022-08-05 16:12:49 +0100553 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
554 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
555 }
Cathal Corbett06902652022-04-14 17:55:11 +0100556
557 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000558 {
Cathal Corbett06902652022-04-14 17:55:11 +0100559 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Ryan OShea4c231de2023-01-17 15:19:20 +0000560 if(biasTensorInfo.IsConstant())
Cathal Corbett06902652022-04-14 17:55:11 +0100561 {
562 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
Mike Kelly07169c82023-08-02 13:23:09 +0100563
564 auto biasName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Bias");
565 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
566 biasName.c_str());
Cathal Corbett06902652022-04-14 17:55:11 +0100567 ARMNN_ASSERT(biasLayer != nullptr);
568 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
569 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
570 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000571 }
572
Ryan OShea4c231de2023-01-17 15:19:20 +0000573 // The data input can also be constant, so we must check that this is also allocated to an input slot
574 if(inputTensorInfo.IsConstant())
575 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100576 auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[0]], inputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000577
Mike Kelly07169c82023-08-02 13:23:09 +0100578 auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
579 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Ryan OShea4c231de2023-01-17 15:19:20 +0000580 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
581 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
582 }
583
Sadik Armagan32ca1442020-11-13 17:51:56 +0000584 ARMNN_ASSERT(layer != nullptr);
585
586 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
587 outputSlot.SetTensorInfo(outputTensorInfo);
588
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000589 if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
590 {
591 return kTfLiteError;
592 }
593
Sadik Armagan32ca1442020-11-13 17:51:56 +0000594 if (!tfLiteNodeParameters)
595 {
596 // No Activation
597 return kTfLiteOk;
598 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000599 // Check and create activation
Mike Kelly07169c82023-08-02 13:23:09 +0100600 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000601}
602
603TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
604 TfLiteContext* tfLiteContext,
605 TfLiteNode* tfLiteNode,
606 int nodeIndex,
607 int32_t operatorCode)
608{
609 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
610 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
611
612 armnn::TransposeConvolution2dDescriptor descriptor;
613 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
614 descriptor.m_BiasEnabled = false;
615 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
616 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
617 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
618
619 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
620 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100621 if (!IsValid(tfLiteContext, tfLiteOutputShapeTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000622 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000623 return kTfLiteError;
624 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000625
626 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100627 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000628 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000629 return kTfLiteError;
630 }
631
632 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100633 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000634 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000635 return kTfLiteError;
636 }
637
638 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100639 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
Sadik Armagan32ca1442020-11-13 17:51:56 +0000640 {
Sadik Armagan32ca1442020-11-13 17:51:56 +0000641 return kTfLiteError;
642 }
643
644 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100645 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Ryan OShea4c231de2023-01-17 15:19:20 +0000646 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000647
648 // TfLite uses NHWC tensors
649 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
650 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
651
652 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
653 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
654
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100655 // This block determines the output shape of the transpose convolution.
656 // If the output shape tensor is a constant, we can access the data at load time and set the shape of the layer.
657 // If this is not constant, we do not have access to the shape data, so we have to use infer output shape.
658 if (tflite::IsConstantTensor(&tfLiteOutputShapeTensor))
659 {
660 const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
661 std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
662 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
663 {
664 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); ++i)
665 {
666 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
667 }
668 }
669
670 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
671 {
672 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); ++i)
673 {
674 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
675 }
676 }
677 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
678 for (int dimension : outputShape)
679 {
680 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
681 }
682 descriptor.m_OutputShapeEnabled = true;
683
684 // TfLite uses NHWC tensors
685 const unsigned int outputHeight = descriptor.m_OutputShape[1];
686 const unsigned int outputWidth = descriptor.m_OutputShape[2];
687
688 CalcPadding(inputHeight,
689 filterHeight,
690 descriptor.m_StrideY,
691 1, // DilationY
692 descriptor.m_PadTop,
693 descriptor.m_PadBottom,
694 parameters->padding,
695 outputHeight);
696
697 CalcPadding(inputWidth,
698 filterWidth,
699 descriptor.m_StrideX,
700 1, // DilationX
701 descriptor.m_PadLeft,
702 descriptor.m_PadRight,
703 parameters->padding,
704 outputWidth);
705 }
706 else
707 {
708 CalcPadding(inputHeight,
709 filterHeight,
710 descriptor.m_StrideY,
711 1, // DilationY
712 descriptor.m_PadTop,
713 descriptor.m_PadBottom,
714 parameters->padding);
715
716 CalcPadding(inputWidth,
717 filterWidth,
718 descriptor.m_StrideX,
719 1, // DilationX
720 descriptor.m_PadLeft,
721 descriptor.m_PadRight,
722 parameters->padding);
723 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000724
725 // Set up filter
726 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
Ryan OShea4c231de2023-01-17 15:19:20 +0000727 filterTensorInfo);
Cathal Corbett53837672022-09-01 11:34:37 +0100728 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000729 if (!delegateData.m_Network)
730 {
731 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000732 FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000733 tfLiteContext,
734 IsTransposeConvolution2dSupported,
735 delegateData.m_Backends,
736 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100737 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000738 inputTensorInfo,
739 outputTensorInfo,
740 descriptor,
741 filterTensorInfo,
742 armnn::EmptyOptional());
743 return isSupported ? kTfLiteOk : kTfLiteError;
744 }
745
Mike Kelly07169c82023-08-02 13:23:09 +0100746 auto layerName = GetLayerName(armnn::LayerType::TransposeConvolution2d, nodeIndex);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000747 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
748 filterTensor,
Mike Kelly07169c82023-08-02 13:23:09 +0100749 armnn::EmptyOptional(),
750 layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100751 layer->SetBackendId(setBackend);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000752 ARMNN_ASSERT(layer != nullptr);
753
Ryan OShea4c231de2023-01-17 15:19:20 +0000754 // The data input can be constant, so we must check that this is allocated to an input slot
755 if(inputTensorInfo.IsConstant())
756 {
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100757 auto input = CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]], inputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000758
Mike Kelly07169c82023-08-02 13:23:09 +0100759 auto inputName = GetLayerName(armnn::LayerType::Constant, nodeIndex, "Input");
760 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Ryan OShea4c231de2023-01-17 15:19:20 +0000761 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
762 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
763 }
764
Sadik Armagan32ca1442020-11-13 17:51:56 +0000765 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
766 outputSlot.SetTensorInfo(outputTensorInfo);
767
768 // Connect
Keith Davis892fafe2020-11-26 17:40:35 +0000769 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000770 {
Keith Davis892fafe2020-11-26 17:40:35 +0000771 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
772 Connect(layer->GetInputSlot(0));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000773 }
774
775 // Prepare output slots
776 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
777 {
778 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Keith Davis892fafe2020-11-26 17:40:35 +0000779 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
780 &outputSlot;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000781 }
782 return kTfLiteOk;
783}
784
Sadik Armagan62483be2020-10-23 17:14:43 +0100785TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
786 TfLiteContext* tfLiteContext,
787 TfLiteNode* tfLiteNode,
788 int nodeIndex,
789 int32_t operatorCode)
790{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000791 switch(operatorCode)
792 {
793 case kTfLiteBuiltinConv2d:
794 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100795// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
796#if defined(ARMNN_POST_TFLITE_2_5)
797 case kTfLiteBuiltinConv3d:
798 return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
799#endif
Sadik Armagan32ca1442020-11-13 17:51:56 +0000800 case kTfLiteBuiltinDepthwiseConv2d:
801 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
802 case kTfLiteBuiltinTransposeConv:
803 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
804 default:
805 return kTfLiteError;
806 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100807}
808
809} // namespace armnnDelegate