blob: 679f4dbe39840cbec4dfe75f947a8ef3b285e6fe [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan32ca1442020-11-13 17:51:56 +00008#include "DelegateUtils.hpp"
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan32ca1442020-11-13 17:51:56 +000014#include "tensorflow/lite/kernels/internal/tensor.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010015
16namespace armnnDelegate
17{
18
Sadik Armagan32ca1442020-11-13 17:51:56 +000019TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
20 TfLiteContext* tfLiteContext,
21 TfLiteNode* tfLiteNode,
22 int nodeIndex,
23 int32_t operatorCode)
24{
25 auto numInputs = tfLiteNode->inputs->size;
26 if (numInputs < 2)
27 {
28 TF_LITE_MAYBE_KERNEL_LOG(
29 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
30 2, numInputs, nodeIndex);
31 return kTfLiteError;
32 }
33 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
34
35 armnn::Convolution2dDescriptor descriptor;
36 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
37
Mike Kelly84d63782022-05-06 12:14:16 +010038 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +000039 descriptor.m_BiasEnabled = biasEnabled;
40 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
41 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
42 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
43 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
44 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
45
46 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
47 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
48 if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
49 {
50 TF_LITE_MAYBE_KERNEL_LOG(
51 tfLiteContext,
52 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
53 operatorCode, nodeIndex);
54 return kTfLiteError;
55 }
56 if (IsDynamicTensor(tfLiteInputTensor))
57 {
58 TF_LITE_MAYBE_KERNEL_LOG(
59 tfLiteContext,
60 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
61 operatorCode, nodeIndex);
62 return kTfLiteError;
63 }
64 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
65 if(!IsValid(&tfLiteOutputTensor))
66 {
67 TF_LITE_MAYBE_KERNEL_LOG(
68 tfLiteContext,
69 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
70 operatorCode, nodeIndex);
71 return kTfLiteError;
72 }
73 if (IsDynamicTensor(tfLiteOutputTensor))
74 {
75 TF_LITE_MAYBE_KERNEL_LOG(
76 tfLiteContext,
77 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
78 operatorCode, nodeIndex);
79 return kTfLiteError;
80 }
81
82 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
83 if(!IsValid(&tfLiteFilterTensor))
84 {
85 TF_LITE_MAYBE_KERNEL_LOG(
86 tfLiteContext,
87 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
88 operatorCode, nodeIndex);
89 return kTfLiteError;
90 }
91 if (IsDynamicTensor(tfLiteFilterTensor))
92 {
93 TF_LITE_MAYBE_KERNEL_LOG(
94 tfLiteContext,
95 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
96 nodeIndex);
97 return kTfLiteError;
98 }
99
100 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
101 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
102
103 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
104
105 armnn::TensorInfo biasTensorInfo;
106 if(biasEnabled)
107 {
108 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
109 if(!IsValid(&tfLiteBiasTensor))
110 {
111 TF_LITE_MAYBE_KERNEL_LOG(
112 tfLiteContext,
113 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
114 operatorCode, nodeIndex);
115 return kTfLiteError;
116 }
117 if (IsDynamicTensor(tfLiteBiasTensor))
118 {
119 TF_LITE_MAYBE_KERNEL_LOG(
120 tfLiteContext,
121 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
122 nodeIndex);
123 return kTfLiteError;
124 }
125 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
126 }
127 else
128 {
129 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
130 }
131
132 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
133
134 // TfLite uses NHWC tensors
135 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
136 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
137
138 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
139 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
140
141 // Calculate padding
142 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
143 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
144 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
145 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
146
147 if (!delegateData.m_Network)
148 {
149 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000150 FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000151 tfLiteContext,
152 IsConvolution2dSupported,
153 delegateData.m_Backends,
154 isSupported,
155 inputTensorInfo,
156 outputTensorInfo,
157 descriptor,
158 filterTensorInfo,
159 optionalBiasInfo);
160 return isSupported ? kTfLiteOk : kTfLiteError;
161 }
162
Sadik Armagan32ca1442020-11-13 17:51:56 +0000163 // Set up filter and biases
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100164 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
165
Sadik Armagan32ca1442020-11-13 17:51:56 +0000166 auto filter =
167 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
168 filterTensorInfo,
169 armnn::Optional<armnn::PermutationVector&>());
170
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100171 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
172 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
173 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
174
175 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000176 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100177 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
178 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
179 {
180 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
181 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
182 ARMNN_ASSERT(biasLayer != nullptr);
183 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
184 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
185 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000186 }
187
188 ARMNN_ASSERT(layer != nullptr);
189
190 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
191 outputSlot.SetTensorInfo(outputTensorInfo);
192
193 Connect(layer, tfLiteNode, delegateData);
194
195 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
196 if (!tfLiteNodeParameters)
197 {
198 // No Activation
199 return kTfLiteOk;
200 }
201 // Check activation
202 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
203 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
204
205}
206
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100207// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
208#if defined(ARMNN_POST_TFLITE_2_5)
209TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
210 TfLiteContext* tfLiteContext,
211 TfLiteNode* tfLiteNode,
212 int nodeIndex,
213 int32_t operatorCode)
214{
215 auto numInputs = tfLiteNode->inputs->size;
216 if (numInputs < 2)
217 {
218 TF_LITE_MAYBE_KERNEL_LOG(
219 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
220 2, numInputs, nodeIndex);
221 return kTfLiteError;
222 }
223 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
224
225 armnn::Convolution3dDescriptor descriptor;
226 const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
227
Mike Kelly84d63782022-05-06 12:14:16 +0100228 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100229 descriptor.m_BiasEnabled = biasEnabled;
230 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
231 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
232 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
233 descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
234 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
235 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
236 descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
237
238 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
239 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
240 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
241 {
242 return kTfLiteError;
243 }
244
245 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
246 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
247 {
248 return kTfLiteError;
249 }
250
251 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
252 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
253 {
254 return kTfLiteError;
255 }
256
257 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
258 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
259
260 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
261
262 armnn::TensorInfo biasTensorInfo;
263 if(biasEnabled)
264 {
265 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
266 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
267 {
268 return kTfLiteError;
269 }
270 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
271 }
272 else
273 {
274 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
275 }
276
277 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
278
279 // TfLite uses NDHWC tensors
280 const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
281 const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
282 const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
283
284 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
285 const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
286 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
287 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
288
289 // Calculate padding
290 CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
291 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
292 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
293 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
294 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
295 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
296
297 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
298 // support for the operator
299 // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
300 if (!delegateData.m_Network)
301 {
302 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000303 FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100304 tfLiteContext,
305 IsConvolution3dSupported,
306 delegateData.m_Backends,
307 isSupported,
308 inputTensorInfo,
309 outputTensorInfo,
310 descriptor,
311 filterTensorInfo,
312 optionalBiasInfo);
313 return isSupported ? kTfLiteOk : kTfLiteError;
314 }
315
316 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
317 ARMNN_ASSERT(layer != nullptr);
318
319 // Add a constant layer for weights and biases if inputs are constant,
320 // which are connected to the Convolution3d layer as inputs.
321 if (tflite::IsConstantTensor(&tfLiteFilterTensor))
322 {
323 auto filter = CreateConstTensor(&tfLiteFilterTensor,
324 filterTensorInfo,
325 armnn::Optional<armnn::PermutationVector&>());
326
327 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
328 ARMNN_ASSERT(weightsLayer != nullptr);
329
330 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
331 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
332 }
333
334 if(biasEnabled)
335 {
336 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
337 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
338 {
339 auto biases = CreateConstTensor(&tfLiteBiasTensor,
340 biasTensorInfo,
341 armnn::Optional<armnn::PermutationVector&>());
342
343 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
344 ARMNN_ASSERT(biasLayer != nullptr);
345
346 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
347 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
348 }
349 }
350
351 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
352 outputSlot.SetTensorInfo(outputTensorInfo);
353
354 Connect(layer, tfLiteNode, delegateData);
355
356 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
357 if (!tfLiteNodeParameters)
358 {
359 // No Activation
360 return kTfLiteOk;
361 }
362
363 // Check activation
364 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
365 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
366}
367#endif
368
Sadik Armagan32ca1442020-11-13 17:51:56 +0000369TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
370 TfLiteContext* tfLiteContext,
371 TfLiteNode* tfLiteNode,
372 int nodeIndex,
373 int32_t operatorCode)
374{
375 auto numInputs = tfLiteNode->inputs->size;
376 if (numInputs < 2)
377 {
378 TF_LITE_MAYBE_KERNEL_LOG(
379 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
380 2, numInputs, nodeIndex);
381 return kTfLiteError;
382 }
383 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
384
Mike Kelly84d63782022-05-06 12:14:16 +0100385 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000386
387 armnn::DepthwiseConvolution2dDescriptor descriptor;
388 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
389
390 descriptor.m_BiasEnabled = biasEnabled;
391 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
392 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
393 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
394 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
395 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
396
397 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
398 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
399 if(!IsValid(&tfLiteInputTensor))
400 {
401 TF_LITE_MAYBE_KERNEL_LOG(
402 tfLiteContext,
403 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
404 operatorCode, nodeIndex);
405 return kTfLiteError;
406 }
407 if (IsDynamicTensor(tfLiteInputTensor))
408 {
409 TF_LITE_MAYBE_KERNEL_LOG(
410 tfLiteContext,
411 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
412 operatorCode, nodeIndex);
413 return kTfLiteError;
414 }
415 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
416 if(!IsValid(&tfLiteOutputTensor))
417 {
418 TF_LITE_MAYBE_KERNEL_LOG(
419 tfLiteContext,
420 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
421 operatorCode, nodeIndex);
422 return kTfLiteError;
423 }
424 if (IsDynamicTensor(tfLiteOutputTensor))
425 {
426 TF_LITE_MAYBE_KERNEL_LOG(
427 tfLiteContext,
428 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
429 operatorCode, nodeIndex);
430 return kTfLiteError;
431 }
432
433 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
434 if(!IsValid(&tfLiteFilterTensor))
435 {
436 TF_LITE_MAYBE_KERNEL_LOG(
437 tfLiteContext,
438 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
439 operatorCode, nodeIndex);
440 return kTfLiteError;
441 }
442 if (IsDynamicTensor(tfLiteFilterTensor))
443 {
444 TF_LITE_MAYBE_KERNEL_LOG(
445 tfLiteContext,
446 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
447 nodeIndex);
448 return kTfLiteError;
449 }
450
451 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
452 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
453
Jan Eilers7612bd62021-04-06 17:29:03 +0100454 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000455
456 // Assuming input is NHWC
457 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
458 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
459
460 // TensorflowLite weights come in the format [1, H, W, I * M]
461 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
462 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
463
Sadik Armagan32ca1442020-11-13 17:51:56 +0000464 // Calculate padding
465 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
466 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
467 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
468 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
469
470 armnn::TensorInfo biasTensorInfo;
471 if(biasEnabled)
472 {
473 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
474 if(!IsValid(&tfLiteBiasTensor))
475 {
476 TF_LITE_MAYBE_KERNEL_LOG(
477 tfLiteContext,
478 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
479 operatorCode, nodeIndex);
480 return kTfLiteError;
481 }
482 if (IsDynamicTensor(tfLiteBiasTensor))
483 {
484 TF_LITE_MAYBE_KERNEL_LOG(
485 tfLiteContext,
486 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
487 nodeIndex);
488 return kTfLiteError;
489 }
490 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
491 }
492 else
493 {
494 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
495 }
496
Jan Eilers53ef7952021-06-02 12:01:25 +0100497 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
498 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000499
Sadik Armagan32ca1442020-11-13 17:51:56 +0000500 if (!delegateData.m_Network)
501 {
502 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000503 FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000504 tfLiteContext,
505 IsDepthwiseConvolutionSupported,
506 delegateData.m_Backends,
507 isSupported,
508 inputTensorInfo,
509 outputTensorInfo,
510 descriptor,
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000511 filter.GetInfo(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000512 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
513 return isSupported ? kTfLiteOk : kTfLiteError;
514 }
515
Cathal Corbett06902652022-04-14 17:55:11 +0100516 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000517
Cathal Corbett06902652022-04-14 17:55:11 +0100518 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
519 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
520 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
521
522 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000523 {
Cathal Corbett06902652022-04-14 17:55:11 +0100524 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
525 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
526 {
527 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
528 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
529 ARMNN_ASSERT(biasLayer != nullptr);
530 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
531 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
532 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000533 }
534
535 ARMNN_ASSERT(layer != nullptr);
536
537 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
538 outputSlot.SetTensorInfo(outputTensorInfo);
539
540 Connect(layer, tfLiteNode, delegateData);
541 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
542 if (!tfLiteNodeParameters)
543 {
544 // No Activation
545 return kTfLiteOk;
546 }
547 // Check activation
548 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
549 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
550}
551
552TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
553 TfLiteContext* tfLiteContext,
554 TfLiteNode* tfLiteNode,
555 int nodeIndex,
556 int32_t operatorCode)
557{
558 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
559 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
560
561 armnn::TransposeConvolution2dDescriptor descriptor;
562 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
563 descriptor.m_BiasEnabled = false;
564 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
565 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
566 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
567
568 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
569 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
570 if(!IsValid(&tfLiteOutputShapeTensor))
571 {
572 TF_LITE_MAYBE_KERNEL_LOG(
573 tfLiteContext,
574 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
575 operatorCode, nodeIndex);
576 return kTfLiteError;
577 }
578 if (IsDynamicTensor(tfLiteOutputShapeTensor))
579 {
580 TF_LITE_MAYBE_KERNEL_LOG(
581 tfLiteContext,
582 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
583 operatorCode, nodeIndex);
584 return kTfLiteError;
585 }
586
587 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
588 std::vector<int32_t> outputShape(tensorInfo.GetNumElements());
589 if (tensorInfo.GetDataType() == armnn::DataType::Signed32)
590 {
591 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
592 {
593 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
594 }
595 }
596
597 if (tensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
598 {
599 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
600 {
601 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
602 }
603 }
604 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
605 for (int dimension : outputShape)
606 {
607 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
608 }
609 descriptor.m_OutputShapeEnabled = true;
610
611 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
612 if(!IsValid(&tfLiteInputTensor))
613 {
614 TF_LITE_MAYBE_KERNEL_LOG(
615 tfLiteContext,
616 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
617 operatorCode, nodeIndex);
618 return kTfLiteError;
619 }
620 if (IsDynamicTensor(tfLiteInputTensor))
621 {
622 TF_LITE_MAYBE_KERNEL_LOG(
623 tfLiteContext,
624 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
625 operatorCode, nodeIndex);
626 return kTfLiteError;
627 }
628
629 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
630 if(!IsValid(&tfLiteOutputTensor))
631 {
632 TF_LITE_MAYBE_KERNEL_LOG(
633 tfLiteContext,
634 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
635 operatorCode, nodeIndex);
636 return kTfLiteError;
637 }
638 if (IsDynamicTensor(tfLiteOutputTensor))
639 {
640 TF_LITE_MAYBE_KERNEL_LOG(
641 tfLiteContext,
642 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
643 operatorCode, nodeIndex);
644 return kTfLiteError;
645 }
646
647 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
648 if(!IsValid(&tfLiteFilterTensor))
649 {
650 TF_LITE_MAYBE_KERNEL_LOG(
651 tfLiteContext,
652 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
653 operatorCode, nodeIndex);
654 return kTfLiteError;
655 }
656 if (IsDynamicTensor(tfLiteFilterTensor))
657 {
658 TF_LITE_MAYBE_KERNEL_LOG(
659 tfLiteContext,
660 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
661 operatorCode, nodeIndex);
662 return kTfLiteError;
663 }
664
665 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
666 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
667 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
668
669 // TfLite uses NHWC tensors
670 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
671 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
672
673 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
674 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
675
676 // Calculate padding
677 CalcPadding(inputHeight,
678 filterHeight,
679 descriptor.m_StrideY,
680 1, // dilation y
681 descriptor.m_PadTop,
682 descriptor.m_PadBottom,
683 parameters->padding);
684 CalcPadding(inputWidth,
685 filterWidth,
686 descriptor.m_StrideX,
687 1, // dilation x
688 descriptor.m_PadLeft,
689 descriptor.m_PadRight,
690 parameters->padding);
691
692 // Set up filter
693 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
694 filterTensorInfo,
695 armnn::Optional<armnn::PermutationVector&>());
696 if (!delegateData.m_Network)
697 {
698 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000699 FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000700 tfLiteContext,
701 IsTransposeConvolution2dSupported,
702 delegateData.m_Backends,
703 isSupported,
704 inputTensorInfo,
705 outputTensorInfo,
706 descriptor,
707 filterTensorInfo,
708 armnn::EmptyOptional());
709 return isSupported ? kTfLiteOk : kTfLiteError;
710 }
711
712 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
713 filterTensor,
714 armnn::EmptyOptional());
715 ARMNN_ASSERT(layer != nullptr);
716
717 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
718 outputSlot.SetTensorInfo(outputTensorInfo);
719
720 // Connect
Keith Davis892fafe2020-11-26 17:40:35 +0000721 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000722 {
Keith Davis892fafe2020-11-26 17:40:35 +0000723 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
724 Connect(layer->GetInputSlot(0));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000725 }
726
727 // Prepare output slots
728 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
729 {
730 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Keith Davis892fafe2020-11-26 17:40:35 +0000731 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
732 &outputSlot;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000733 }
734 return kTfLiteOk;
735}
736
Sadik Armagan62483be2020-10-23 17:14:43 +0100737TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
738 TfLiteContext* tfLiteContext,
739 TfLiteNode* tfLiteNode,
740 int nodeIndex,
741 int32_t operatorCode)
742{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000743 switch(operatorCode)
744 {
745 case kTfLiteBuiltinConv2d:
746 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100747// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
748#if defined(ARMNN_POST_TFLITE_2_5)
749 case kTfLiteBuiltinConv3d:
750 return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
751#endif
Sadik Armagan32ca1442020-11-13 17:51:56 +0000752 case kTfLiteBuiltinDepthwiseConv2d:
753 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
754 case kTfLiteBuiltinTransposeConv:
755 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
756 default:
757 return kTfLiteError;
758 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100759}
760
761} // namespace armnnDelegate