blob: 7ea3a3a98709001d7a3a8ee954740c492c952d64 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan32ca1442020-11-13 17:51:56 +00008#include "DelegateUtils.hpp"
Ryan OShea3ad2e142023-01-13 10:19:20 +00009#include "SharedFunctions.hpp"
Sadik Armagan32ca1442020-11-13 17:51:56 +000010
Sadik Armagan62483be2020-10-23 17:14:43 +010011#include <tensorflow/lite/builtin_ops.h>
12#include <tensorflow/lite/c/builtin_op_data.h>
13#include <tensorflow/lite/c/common.h>
14#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan32ca1442020-11-13 17:51:56 +000015#include "tensorflow/lite/kernels/internal/tensor.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010016
17namespace armnnDelegate
18{
19
Sadik Armagan32ca1442020-11-13 17:51:56 +000020TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
21 TfLiteContext* tfLiteContext,
22 TfLiteNode* tfLiteNode,
23 int nodeIndex,
24 int32_t operatorCode)
25{
26 auto numInputs = tfLiteNode->inputs->size;
27 if (numInputs < 2)
28 {
29 TF_LITE_MAYBE_KERNEL_LOG(
30 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
31 2, numInputs, nodeIndex);
32 return kTfLiteError;
33 }
34 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
35
36 armnn::Convolution2dDescriptor descriptor;
37 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
38
Mike Kelly84d63782022-05-06 12:14:16 +010039 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +000040 descriptor.m_BiasEnabled = biasEnabled;
41 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
42 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
43 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
44 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
45 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
46
47 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
48 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
49 if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
50 {
51 TF_LITE_MAYBE_KERNEL_LOG(
52 tfLiteContext,
53 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
54 operatorCode, nodeIndex);
55 return kTfLiteError;
56 }
57 if (IsDynamicTensor(tfLiteInputTensor))
58 {
59 TF_LITE_MAYBE_KERNEL_LOG(
60 tfLiteContext,
61 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
62 operatorCode, nodeIndex);
63 return kTfLiteError;
64 }
65 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
66 if(!IsValid(&tfLiteOutputTensor))
67 {
68 TF_LITE_MAYBE_KERNEL_LOG(
69 tfLiteContext,
70 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
71 operatorCode, nodeIndex);
72 return kTfLiteError;
73 }
74 if (IsDynamicTensor(tfLiteOutputTensor))
75 {
76 TF_LITE_MAYBE_KERNEL_LOG(
77 tfLiteContext,
78 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
79 operatorCode, nodeIndex);
80 return kTfLiteError;
81 }
82
83 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
84 if(!IsValid(&tfLiteFilterTensor))
85 {
86 TF_LITE_MAYBE_KERNEL_LOG(
87 tfLiteContext,
88 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
89 operatorCode, nodeIndex);
90 return kTfLiteError;
91 }
92 if (IsDynamicTensor(tfLiteFilterTensor))
93 {
94 TF_LITE_MAYBE_KERNEL_LOG(
95 tfLiteContext,
96 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
97 nodeIndex);
98 return kTfLiteError;
99 }
100
101 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100102 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000103
Ryan OShea3ad2e142023-01-13 10:19:20 +0000104 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
105 TfLiteFusedActivation activationType;
106 if (tfLiteNodeParameters)
107 {
108 activationType = tfLiteNodeParameters->activation;
109
110 const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
111 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
112 outputTensorInfo, activationType);
113 if(activationStatus != kTfLiteOk)
114 {
115 return kTfLiteError;
116 }
117
118 }
119
Sadik Armagan32ca1442020-11-13 17:51:56 +0000120 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
121
122 armnn::TensorInfo biasTensorInfo;
123 if(biasEnabled)
124 {
125 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
126 if(!IsValid(&tfLiteBiasTensor))
127 {
128 TF_LITE_MAYBE_KERNEL_LOG(
129 tfLiteContext,
130 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
131 operatorCode, nodeIndex);
132 return kTfLiteError;
133 }
134 if (IsDynamicTensor(tfLiteBiasTensor))
135 {
136 TF_LITE_MAYBE_KERNEL_LOG(
137 tfLiteContext,
138 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
139 nodeIndex);
140 return kTfLiteError;
141 }
142 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
143 }
144 else
145 {
146 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
147 }
148
149 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
150
151 // TfLite uses NHWC tensors
152 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
153 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
154
155 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
156 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
157
158 // Calculate padding
159 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
160 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
161 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
162 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
163
Cathal Corbett53837672022-09-01 11:34:37 +0100164 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000165 if (!delegateData.m_Network)
166 {
167 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000168 FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000169 tfLiteContext,
170 IsConvolution2dSupported,
171 delegateData.m_Backends,
172 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100173 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000174 inputTensorInfo,
175 outputTensorInfo,
176 descriptor,
177 filterTensorInfo,
178 optionalBiasInfo);
179 return isSupported ? kTfLiteOk : kTfLiteError;
180 }
181
Sadik Armagan32ca1442020-11-13 17:51:56 +0000182 // Set up filter and biases
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100183 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100184 layer->SetBackendId(setBackend);
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100185
Sadik Armagan90a119b2022-08-05 16:12:49 +0100186 if(tflite::IsConstantTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]]))
187 {
188 auto filter =
189 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
190 filterTensorInfo,
191 armnn::Optional<armnn::PermutationVector &>());
Sadik Armagan32ca1442020-11-13 17:51:56 +0000192
Sadik Armagan90a119b2022-08-05 16:12:49 +0100193 armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
194 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
195 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
196 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100197
198 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000199 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100200 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
201 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
202 {
203 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
204 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
205 ARMNN_ASSERT(biasLayer != nullptr);
206 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
207 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
208 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000209 }
210
211 ARMNN_ASSERT(layer != nullptr);
212
213 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
214 outputSlot.SetTensorInfo(outputTensorInfo);
215
216 Connect(layer, tfLiteNode, delegateData);
217
Sadik Armagan32ca1442020-11-13 17:51:56 +0000218 if (!tfLiteNodeParameters)
219 {
220 // No Activation
221 return kTfLiteOk;
222 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000223 // Check and Create activation
Sadik Armagan32ca1442020-11-13 17:51:56 +0000224 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
225
226}
227
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100228// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
229#if defined(ARMNN_POST_TFLITE_2_5)
230TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
231 TfLiteContext* tfLiteContext,
232 TfLiteNode* tfLiteNode,
233 int nodeIndex,
234 int32_t operatorCode)
235{
236 auto numInputs = tfLiteNode->inputs->size;
237 if (numInputs < 2)
238 {
239 TF_LITE_MAYBE_KERNEL_LOG(
240 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
241 2, numInputs, nodeIndex);
242 return kTfLiteError;
243 }
244 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
245
246 armnn::Convolution3dDescriptor descriptor;
247 const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
248
Mike Kelly84d63782022-05-06 12:14:16 +0100249 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100250 descriptor.m_BiasEnabled = biasEnabled;
251 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
252 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
253 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
254 descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
255 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
256 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
257 descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
258
259 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
260 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
261 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
262 {
263 return kTfLiteError;
264 }
265
266 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
267 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
268 {
269 return kTfLiteError;
270 }
271
272 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
273 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
274 {
275 return kTfLiteError;
276 }
277
278 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100279 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100280
Ryan OShea3ad2e142023-01-13 10:19:20 +0000281 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
282 TfLiteFusedActivation activationType;
283 if (tfLiteNodeParameters)
284 {
285 activationType = tfLiteNodeParameters->activation;
286
287 const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
288 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
289 outputTensorInfo, activationType);
290 if(activationStatus != kTfLiteOk)
291 {
292 return kTfLiteError;
293 }
294
295 }
296
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100297 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
298
299 armnn::TensorInfo biasTensorInfo;
300 if(biasEnabled)
301 {
302 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
303 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
304 {
305 return kTfLiteError;
306 }
307 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
308 }
309 else
310 {
311 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
312 }
313
314 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
315
316 // TfLite uses NDHWC tensors
317 const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
318 const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
319 const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
320
321 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
322 const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
323 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
324 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
325
326 // Calculate padding
327 CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
328 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
329 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
330 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
331 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
332 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
333
334 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
335 // support for the operator
336 // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
Cathal Corbett53837672022-09-01 11:34:37 +0100337 armnn::BackendId setBackend;
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100338 if (!delegateData.m_Network)
339 {
340 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000341 FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100342 tfLiteContext,
343 IsConvolution3dSupported,
344 delegateData.m_Backends,
345 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100346 setBackend,
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100347 inputTensorInfo,
348 outputTensorInfo,
349 descriptor,
350 filterTensorInfo,
351 optionalBiasInfo);
352 return isSupported ? kTfLiteOk : kTfLiteError;
353 }
354
355 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100356 layer->SetBackendId(setBackend);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100357 ARMNN_ASSERT(layer != nullptr);
358
359 // Add a constant layer for weights and biases if inputs are constant,
360 // which are connected to the Convolution3d layer as inputs.
361 if (tflite::IsConstantTensor(&tfLiteFilterTensor))
362 {
363 auto filter = CreateConstTensor(&tfLiteFilterTensor,
364 filterTensorInfo,
365 armnn::Optional<armnn::PermutationVector&>());
366
367 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
368 ARMNN_ASSERT(weightsLayer != nullptr);
369
370 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
371 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
372 }
373
374 if(biasEnabled)
375 {
376 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
377 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
378 {
379 auto biases = CreateConstTensor(&tfLiteBiasTensor,
380 biasTensorInfo,
381 armnn::Optional<armnn::PermutationVector&>());
382
383 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
384 ARMNN_ASSERT(biasLayer != nullptr);
385
386 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
387 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
388 }
389 }
390
391 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
392 outputSlot.SetTensorInfo(outputTensorInfo);
393
394 Connect(layer, tfLiteNode, delegateData);
395
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100396 if (!tfLiteNodeParameters)
397 {
398 // No Activation
399 return kTfLiteOk;
400 }
401
Ryan OShea3ad2e142023-01-13 10:19:20 +0000402 // Check and create activation
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100403 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
404}
405#endif
406
Sadik Armagan32ca1442020-11-13 17:51:56 +0000407TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
408 TfLiteContext* tfLiteContext,
409 TfLiteNode* tfLiteNode,
410 int nodeIndex,
411 int32_t operatorCode)
412{
413 auto numInputs = tfLiteNode->inputs->size;
414 if (numInputs < 2)
415 {
416 TF_LITE_MAYBE_KERNEL_LOG(
417 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
418 2, numInputs, nodeIndex);
419 return kTfLiteError;
420 }
421 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
422
Mike Kelly84d63782022-05-06 12:14:16 +0100423 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000424
425 armnn::DepthwiseConvolution2dDescriptor descriptor;
426 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
427
428 descriptor.m_BiasEnabled = biasEnabled;
429 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
430 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
431 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
432 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
433 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
434
435 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
436 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
437 if(!IsValid(&tfLiteInputTensor))
438 {
439 TF_LITE_MAYBE_KERNEL_LOG(
440 tfLiteContext,
441 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
442 operatorCode, nodeIndex);
443 return kTfLiteError;
444 }
445 if (IsDynamicTensor(tfLiteInputTensor))
446 {
447 TF_LITE_MAYBE_KERNEL_LOG(
448 tfLiteContext,
449 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
450 operatorCode, nodeIndex);
451 return kTfLiteError;
452 }
453 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
454 if(!IsValid(&tfLiteOutputTensor))
455 {
456 TF_LITE_MAYBE_KERNEL_LOG(
457 tfLiteContext,
458 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
459 operatorCode, nodeIndex);
460 return kTfLiteError;
461 }
462 if (IsDynamicTensor(tfLiteOutputTensor))
463 {
464 TF_LITE_MAYBE_KERNEL_LOG(
465 tfLiteContext,
466 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
467 operatorCode, nodeIndex);
468 return kTfLiteError;
469 }
470
471 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
472 if(!IsValid(&tfLiteFilterTensor))
473 {
474 TF_LITE_MAYBE_KERNEL_LOG(
475 tfLiteContext,
476 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
477 operatorCode, nodeIndex);
478 return kTfLiteError;
479 }
480 if (IsDynamicTensor(tfLiteFilterTensor))
481 {
482 TF_LITE_MAYBE_KERNEL_LOG(
483 tfLiteContext,
484 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
485 nodeIndex);
486 return kTfLiteError;
487 }
488
489 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100490 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000491
Ryan OShea3ad2e142023-01-13 10:19:20 +0000492 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams *>(tfLiteNode->builtin_data);
493 TfLiteFusedActivation activationType;
494 if (tfLiteNodeParameters)
495 {
496 activationType = tfLiteNodeParameters->activation;
497
498 const armnn::TensorInfo& activationOutputInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
499 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
500 outputTensorInfo, activationType);
501 if(activationStatus != kTfLiteOk)
502 {
503 return kTfLiteError;
504 }
505
506 }
507
Jan Eilers7612bd62021-04-06 17:29:03 +0100508 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000509
510 // Assuming input is NHWC
511 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
512 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
513
514 // TensorflowLite weights come in the format [1, H, W, I * M]
515 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
516 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
517
Sadik Armagan32ca1442020-11-13 17:51:56 +0000518 // Calculate padding
519 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
520 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
521 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
522 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
523
524 armnn::TensorInfo biasTensorInfo;
525 if(biasEnabled)
526 {
527 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
528 if(!IsValid(&tfLiteBiasTensor))
529 {
530 TF_LITE_MAYBE_KERNEL_LOG(
531 tfLiteContext,
532 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
533 operatorCode, nodeIndex);
534 return kTfLiteError;
535 }
536 if (IsDynamicTensor(tfLiteBiasTensor))
537 {
538 TF_LITE_MAYBE_KERNEL_LOG(
539 tfLiteContext,
540 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
541 nodeIndex);
542 return kTfLiteError;
543 }
544 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
545 }
546 else
547 {
548 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
549 }
550
Cathal Corbett53837672022-09-01 11:34:37 +0100551 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000552 if (!delegateData.m_Network)
553 {
554 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000555 FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000556 tfLiteContext,
557 IsDepthwiseConvolutionSupported,
558 delegateData.m_Backends,
559 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100560 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000561 inputTensorInfo,
562 outputTensorInfo,
563 descriptor,
Sadik Armagan90a119b2022-08-05 16:12:49 +0100564 filterTensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000565 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
566 return isSupported ? kTfLiteOk : kTfLiteError;
567 }
568
Cathal Corbett06902652022-04-14 17:55:11 +0100569 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100570 layer->SetBackendId(setBackend);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000571
Sadik Armagan90a119b2022-08-05 16:12:49 +0100572 if(tflite::IsConstantTensor(&tfLiteFilterTensor))
573 {
574 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
575 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
576
577 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
578 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
579 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
580 }
Cathal Corbett06902652022-04-14 17:55:11 +0100581
582 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000583 {
Cathal Corbett06902652022-04-14 17:55:11 +0100584 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
585 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
586 {
587 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
588 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
589 ARMNN_ASSERT(biasLayer != nullptr);
590 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
591 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
592 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000593 }
594
595 ARMNN_ASSERT(layer != nullptr);
596
597 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
598 outputSlot.SetTensorInfo(outputTensorInfo);
599
600 Connect(layer, tfLiteNode, delegateData);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000601 if (!tfLiteNodeParameters)
602 {
603 // No Activation
604 return kTfLiteOk;
605 }
Ryan OShea3ad2e142023-01-13 10:19:20 +0000606 // Check and create activation
Sadik Armagan32ca1442020-11-13 17:51:56 +0000607 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
608}
609
610TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
611 TfLiteContext* tfLiteContext,
612 TfLiteNode* tfLiteNode,
613 int nodeIndex,
614 int32_t operatorCode)
615{
616 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
617 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
618
619 armnn::TransposeConvolution2dDescriptor descriptor;
620 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
621 descriptor.m_BiasEnabled = false;
622 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
623 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
624 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
625
626 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
627 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
628 if(!IsValid(&tfLiteOutputShapeTensor))
629 {
630 TF_LITE_MAYBE_KERNEL_LOG(
631 tfLiteContext,
632 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
633 operatorCode, nodeIndex);
634 return kTfLiteError;
635 }
636 if (IsDynamicTensor(tfLiteOutputShapeTensor))
637 {
638 TF_LITE_MAYBE_KERNEL_LOG(
639 tfLiteContext,
640 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
641 operatorCode, nodeIndex);
642 return kTfLiteError;
643 }
644
645 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
646 std::vector<int32_t> outputShape(tensorInfo.GetNumElements());
647 if (tensorInfo.GetDataType() == armnn::DataType::Signed32)
648 {
649 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
650 {
651 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
652 }
653 }
654
655 if (tensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
656 {
657 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
658 {
659 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
660 }
661 }
662 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
663 for (int dimension : outputShape)
664 {
665 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
666 }
667 descriptor.m_OutputShapeEnabled = true;
668
669 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
670 if(!IsValid(&tfLiteInputTensor))
671 {
672 TF_LITE_MAYBE_KERNEL_LOG(
673 tfLiteContext,
674 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
675 operatorCode, nodeIndex);
676 return kTfLiteError;
677 }
678 if (IsDynamicTensor(tfLiteInputTensor))
679 {
680 TF_LITE_MAYBE_KERNEL_LOG(
681 tfLiteContext,
682 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
683 operatorCode, nodeIndex);
684 return kTfLiteError;
685 }
686
687 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
688 if(!IsValid(&tfLiteOutputTensor))
689 {
690 TF_LITE_MAYBE_KERNEL_LOG(
691 tfLiteContext,
692 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
693 operatorCode, nodeIndex);
694 return kTfLiteError;
695 }
696 if (IsDynamicTensor(tfLiteOutputTensor))
697 {
698 TF_LITE_MAYBE_KERNEL_LOG(
699 tfLiteContext,
700 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
701 operatorCode, nodeIndex);
702 return kTfLiteError;
703 }
704
705 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
706 if(!IsValid(&tfLiteFilterTensor))
707 {
708 TF_LITE_MAYBE_KERNEL_LOG(
709 tfLiteContext,
710 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
711 operatorCode, nodeIndex);
712 return kTfLiteError;
713 }
714 if (IsDynamicTensor(tfLiteFilterTensor))
715 {
716 TF_LITE_MAYBE_KERNEL_LOG(
717 tfLiteContext,
718 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
719 operatorCode, nodeIndex);
720 return kTfLiteError;
721 }
722
723 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100724 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000725 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
726
727 // TfLite uses NHWC tensors
728 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
729 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
730
731 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
732 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
733
734 // Calculate padding
735 CalcPadding(inputHeight,
736 filterHeight,
737 descriptor.m_StrideY,
738 1, // dilation y
739 descriptor.m_PadTop,
740 descriptor.m_PadBottom,
741 parameters->padding);
742 CalcPadding(inputWidth,
743 filterWidth,
744 descriptor.m_StrideX,
745 1, // dilation x
746 descriptor.m_PadLeft,
747 descriptor.m_PadRight,
748 parameters->padding);
749
750 // Set up filter
751 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
752 filterTensorInfo,
753 armnn::Optional<armnn::PermutationVector&>());
Cathal Corbett53837672022-09-01 11:34:37 +0100754 armnn::BackendId setBackend;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000755 if (!delegateData.m_Network)
756 {
757 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000758 FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000759 tfLiteContext,
760 IsTransposeConvolution2dSupported,
761 delegateData.m_Backends,
762 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100763 setBackend,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000764 inputTensorInfo,
765 outputTensorInfo,
766 descriptor,
767 filterTensorInfo,
768 armnn::EmptyOptional());
769 return isSupported ? kTfLiteOk : kTfLiteError;
770 }
771
772 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
773 filterTensor,
774 armnn::EmptyOptional());
Cathal Corbett53837672022-09-01 11:34:37 +0100775 layer->SetBackendId(setBackend);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000776 ARMNN_ASSERT(layer != nullptr);
777
778 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
779 outputSlot.SetTensorInfo(outputTensorInfo);
780
781 // Connect
Keith Davis892fafe2020-11-26 17:40:35 +0000782 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000783 {
Keith Davis892fafe2020-11-26 17:40:35 +0000784 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
785 Connect(layer->GetInputSlot(0));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000786 }
787
788 // Prepare output slots
789 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
790 {
791 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Keith Davis892fafe2020-11-26 17:40:35 +0000792 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
793 &outputSlot;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000794 }
795 return kTfLiteOk;
796}
797
Sadik Armagan62483be2020-10-23 17:14:43 +0100798TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
799 TfLiteContext* tfLiteContext,
800 TfLiteNode* tfLiteNode,
801 int nodeIndex,
802 int32_t operatorCode)
803{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000804 switch(operatorCode)
805 {
806 case kTfLiteBuiltinConv2d:
807 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100808// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
809#if defined(ARMNN_POST_TFLITE_2_5)
810 case kTfLiteBuiltinConv3d:
811 return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
812#endif
Sadik Armagan32ca1442020-11-13 17:51:56 +0000813 case kTfLiteBuiltinDepthwiseConv2d:
814 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
815 case kTfLiteBuiltinTransposeConv:
816 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
817 default:
818 return kTfLiteError;
819 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100820}
821
822} // namespace armnnDelegate