blob: 93da4c8ce2e27bf8d99425f91a540a030418fdef [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Sadik Armagan90a119b2022-08-05 16:12:49 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan32ca1442020-11-13 17:51:56 +00008#include "DelegateUtils.hpp"
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan32ca1442020-11-13 17:51:56 +000014#include "tensorflow/lite/kernels/internal/tensor.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010015
16namespace armnnDelegate
17{
18
Sadik Armagan32ca1442020-11-13 17:51:56 +000019TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
20 TfLiteContext* tfLiteContext,
21 TfLiteNode* tfLiteNode,
22 int nodeIndex,
23 int32_t operatorCode)
24{
25 auto numInputs = tfLiteNode->inputs->size;
26 if (numInputs < 2)
27 {
28 TF_LITE_MAYBE_KERNEL_LOG(
29 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
30 2, numInputs, nodeIndex);
31 return kTfLiteError;
32 }
33 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
34
35 armnn::Convolution2dDescriptor descriptor;
36 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
37
Mike Kelly84d63782022-05-06 12:14:16 +010038 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +000039 descriptor.m_BiasEnabled = biasEnabled;
40 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
41 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
42 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
43 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
44 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
45
46 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
47 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
48 if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
49 {
50 TF_LITE_MAYBE_KERNEL_LOG(
51 tfLiteContext,
52 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
53 operatorCode, nodeIndex);
54 return kTfLiteError;
55 }
56 if (IsDynamicTensor(tfLiteInputTensor))
57 {
58 TF_LITE_MAYBE_KERNEL_LOG(
59 tfLiteContext,
60 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
61 operatorCode, nodeIndex);
62 return kTfLiteError;
63 }
64 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
65 if(!IsValid(&tfLiteOutputTensor))
66 {
67 TF_LITE_MAYBE_KERNEL_LOG(
68 tfLiteContext,
69 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
70 operatorCode, nodeIndex);
71 return kTfLiteError;
72 }
73 if (IsDynamicTensor(tfLiteOutputTensor))
74 {
75 TF_LITE_MAYBE_KERNEL_LOG(
76 tfLiteContext,
77 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
78 operatorCode, nodeIndex);
79 return kTfLiteError;
80 }
81
82 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
83 if(!IsValid(&tfLiteFilterTensor))
84 {
85 TF_LITE_MAYBE_KERNEL_LOG(
86 tfLiteContext,
87 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
88 operatorCode, nodeIndex);
89 return kTfLiteError;
90 }
91 if (IsDynamicTensor(tfLiteFilterTensor))
92 {
93 TF_LITE_MAYBE_KERNEL_LOG(
94 tfLiteContext,
95 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
96 nodeIndex);
97 return kTfLiteError;
98 }
99
100 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100101 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000102
103 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
104
105 armnn::TensorInfo biasTensorInfo;
106 if(biasEnabled)
107 {
108 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
109 if(!IsValid(&tfLiteBiasTensor))
110 {
111 TF_LITE_MAYBE_KERNEL_LOG(
112 tfLiteContext,
113 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
114 operatorCode, nodeIndex);
115 return kTfLiteError;
116 }
117 if (IsDynamicTensor(tfLiteBiasTensor))
118 {
119 TF_LITE_MAYBE_KERNEL_LOG(
120 tfLiteContext,
121 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
122 nodeIndex);
123 return kTfLiteError;
124 }
125 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
126 }
127 else
128 {
129 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
130 }
131
132 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
133
134 // TfLite uses NHWC tensors
135 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
136 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
137
138 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
139 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
140
141 // Calculate padding
142 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
143 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
144 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
145 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
146
147 if (!delegateData.m_Network)
148 {
149 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000150 FORWARD_LAYER_SUPPORT_FUNC("CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000151 tfLiteContext,
152 IsConvolution2dSupported,
153 delegateData.m_Backends,
154 isSupported,
155 inputTensorInfo,
156 outputTensorInfo,
157 descriptor,
158 filterTensorInfo,
159 optionalBiasInfo);
160 return isSupported ? kTfLiteOk : kTfLiteError;
161 }
162
Sadik Armagan32ca1442020-11-13 17:51:56 +0000163 // Set up filter and biases
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100164 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
165
Sadik Armagan90a119b2022-08-05 16:12:49 +0100166 if(tflite::IsConstantTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]]))
167 {
168 auto filter =
169 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
170 filterTensorInfo,
171 armnn::Optional<armnn::PermutationVector &>());
Sadik Armagan32ca1442020-11-13 17:51:56 +0000172
Sadik Armagan90a119b2022-08-05 16:12:49 +0100173 armnn::IConnectableLayer *weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
174 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
175 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
176 }
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100177
178 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000179 {
Keith Davisb4dd5cc2022-04-07 11:32:00 +0100180 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
181 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
182 {
183 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
184 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
185 ARMNN_ASSERT(biasLayer != nullptr);
186 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
187 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
188 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000189 }
190
191 ARMNN_ASSERT(layer != nullptr);
192
193 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
194 outputSlot.SetTensorInfo(outputTensorInfo);
195
196 Connect(layer, tfLiteNode, delegateData);
197
198 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
199 if (!tfLiteNodeParameters)
200 {
201 // No Activation
202 return kTfLiteOk;
203 }
204 // Check activation
205 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
206 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
207
208}
209
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100210// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
211#if defined(ARMNN_POST_TFLITE_2_5)
212TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
213 TfLiteContext* tfLiteContext,
214 TfLiteNode* tfLiteNode,
215 int nodeIndex,
216 int32_t operatorCode)
217{
218 auto numInputs = tfLiteNode->inputs->size;
219 if (numInputs < 2)
220 {
221 TF_LITE_MAYBE_KERNEL_LOG(
222 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
223 2, numInputs, nodeIndex);
224 return kTfLiteError;
225 }
226 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
227
228 armnn::Convolution3dDescriptor descriptor;
229 const auto params = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
230
Mike Kelly84d63782022-05-06 12:14:16 +0100231 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100232 descriptor.m_BiasEnabled = biasEnabled;
233 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
234 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
235 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
236 descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
237 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
238 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
239 descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
240
241 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
242 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
243 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
244 {
245 return kTfLiteError;
246 }
247
248 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
249 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
250 {
251 return kTfLiteError;
252 }
253
254 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
255 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
256 {
257 return kTfLiteError;
258 }
259
260 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100261 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100262
263 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
264
265 armnn::TensorInfo biasTensorInfo;
266 if(biasEnabled)
267 {
268 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
269 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
270 {
271 return kTfLiteError;
272 }
273 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
274 }
275 else
276 {
277 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
278 }
279
280 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
281
282 // TfLite uses NDHWC tensors
283 const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
284 const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
285 const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
286
287 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
288 const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
289 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
290 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
291
292 // Calculate padding
293 CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
294 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
295 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
296 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
297 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
298 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
299
300 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
301 // support for the operator
302 // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
303 if (!delegateData.m_Network)
304 {
305 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000306 FORWARD_LAYER_SUPPORT_FUNC("CONV3D",
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100307 tfLiteContext,
308 IsConvolution3dSupported,
309 delegateData.m_Backends,
310 isSupported,
311 inputTensorInfo,
312 outputTensorInfo,
313 descriptor,
314 filterTensorInfo,
315 optionalBiasInfo);
316 return isSupported ? kTfLiteOk : kTfLiteError;
317 }
318
319 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor);
320 ARMNN_ASSERT(layer != nullptr);
321
322 // Add a constant layer for weights and biases if inputs are constant,
323 // which are connected to the Convolution3d layer as inputs.
324 if (tflite::IsConstantTensor(&tfLiteFilterTensor))
325 {
326 auto filter = CreateConstTensor(&tfLiteFilterTensor,
327 filterTensorInfo,
328 armnn::Optional<armnn::PermutationVector&>());
329
330 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
331 ARMNN_ASSERT(weightsLayer != nullptr);
332
333 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
334 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
335 }
336
337 if(biasEnabled)
338 {
339 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
340 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
341 {
342 auto biases = CreateConstTensor(&tfLiteBiasTensor,
343 biasTensorInfo,
344 armnn::Optional<armnn::PermutationVector&>());
345
346 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biases);
347 ARMNN_ASSERT(biasLayer != nullptr);
348
349 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
350 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
351 }
352 }
353
354 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
355 outputSlot.SetTensorInfo(outputTensorInfo);
356
357 Connect(layer, tfLiteNode, delegateData);
358
359 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(tfLiteNode->builtin_data);
360 if (!tfLiteNodeParameters)
361 {
362 // No Activation
363 return kTfLiteOk;
364 }
365
366 // Check activation
367 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
368 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
369}
370#endif
371
Sadik Armagan32ca1442020-11-13 17:51:56 +0000372TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
373 TfLiteContext* tfLiteContext,
374 TfLiteNode* tfLiteNode,
375 int nodeIndex,
376 int32_t operatorCode)
377{
378 auto numInputs = tfLiteNode->inputs->size;
379 if (numInputs < 2)
380 {
381 TF_LITE_MAYBE_KERNEL_LOG(
382 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
383 2, numInputs, nodeIndex);
384 return kTfLiteError;
385 }
386 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
387
Mike Kelly84d63782022-05-06 12:14:16 +0100388 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000389
390 armnn::DepthwiseConvolution2dDescriptor descriptor;
391 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
392
393 descriptor.m_BiasEnabled = biasEnabled;
394 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
395 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
396 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
397 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
398 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
399
400 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
401 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
402 if(!IsValid(&tfLiteInputTensor))
403 {
404 TF_LITE_MAYBE_KERNEL_LOG(
405 tfLiteContext,
406 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
407 operatorCode, nodeIndex);
408 return kTfLiteError;
409 }
410 if (IsDynamicTensor(tfLiteInputTensor))
411 {
412 TF_LITE_MAYBE_KERNEL_LOG(
413 tfLiteContext,
414 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
415 operatorCode, nodeIndex);
416 return kTfLiteError;
417 }
418 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
419 if(!IsValid(&tfLiteOutputTensor))
420 {
421 TF_LITE_MAYBE_KERNEL_LOG(
422 tfLiteContext,
423 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
424 operatorCode, nodeIndex);
425 return kTfLiteError;
426 }
427 if (IsDynamicTensor(tfLiteOutputTensor))
428 {
429 TF_LITE_MAYBE_KERNEL_LOG(
430 tfLiteContext,
431 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
432 operatorCode, nodeIndex);
433 return kTfLiteError;
434 }
435
436 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
437 if(!IsValid(&tfLiteFilterTensor))
438 {
439 TF_LITE_MAYBE_KERNEL_LOG(
440 tfLiteContext,
441 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
442 operatorCode, nodeIndex);
443 return kTfLiteError;
444 }
445 if (IsDynamicTensor(tfLiteFilterTensor))
446 {
447 TF_LITE_MAYBE_KERNEL_LOG(
448 tfLiteContext,
449 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
450 nodeIndex);
451 return kTfLiteError;
452 }
453
454 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100455 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000456
Jan Eilers7612bd62021-04-06 17:29:03 +0100457 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000458
459 // Assuming input is NHWC
460 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
461 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
462
463 // TensorflowLite weights come in the format [1, H, W, I * M]
464 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
465 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
466
Sadik Armagan32ca1442020-11-13 17:51:56 +0000467 // Calculate padding
468 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
469 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
470 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
471 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
472
473 armnn::TensorInfo biasTensorInfo;
474 if(biasEnabled)
475 {
476 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
477 if(!IsValid(&tfLiteBiasTensor))
478 {
479 TF_LITE_MAYBE_KERNEL_LOG(
480 tfLiteContext,
481 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
482 operatorCode, nodeIndex);
483 return kTfLiteError;
484 }
485 if (IsDynamicTensor(tfLiteBiasTensor))
486 {
487 TF_LITE_MAYBE_KERNEL_LOG(
488 tfLiteContext,
489 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
490 nodeIndex);
491 return kTfLiteError;
492 }
493 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
494 }
495 else
496 {
497 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
498 }
499
500 if (!delegateData.m_Network)
501 {
502 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000503 FORWARD_LAYER_SUPPORT_FUNC("DEPTHWISE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000504 tfLiteContext,
505 IsDepthwiseConvolutionSupported,
506 delegateData.m_Backends,
507 isSupported,
508 inputTensorInfo,
509 outputTensorInfo,
510 descriptor,
Sadik Armagan90a119b2022-08-05 16:12:49 +0100511 filterTensorInfo,
Sadik Armagan32ca1442020-11-13 17:51:56 +0000512 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
513 return isSupported ? kTfLiteOk : kTfLiteError;
514 }
515
Cathal Corbett06902652022-04-14 17:55:11 +0100516 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000517
Sadik Armagan90a119b2022-08-05 16:12:49 +0100518 if(tflite::IsConstantTensor(&tfLiteFilterTensor))
519 {
520 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
521 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
522
523 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
524 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
525 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
526 }
Cathal Corbett06902652022-04-14 17:55:11 +0100527
528 if (biasEnabled)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000529 {
Cathal Corbett06902652022-04-14 17:55:11 +0100530 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
531 if(tflite::IsConstantTensor(&tfLiteBiasTensor))
532 {
533 auto biasTensor = CreateConstTensor(&tfLiteBiasTensor, biasTensorInfo);
534 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
535 ARMNN_ASSERT(biasLayer != nullptr);
536 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
537 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
538 }
Sadik Armagan32ca1442020-11-13 17:51:56 +0000539 }
540
541 ARMNN_ASSERT(layer != nullptr);
542
543 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
544 outputSlot.SetTensorInfo(outputTensorInfo);
545
546 Connect(layer, tfLiteNode, delegateData);
547 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
548 if (!tfLiteNodeParameters)
549 {
550 // No Activation
551 return kTfLiteOk;
552 }
553 // Check activation
554 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
555 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
556}
557
558TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
559 TfLiteContext* tfLiteContext,
560 TfLiteNode* tfLiteNode,
561 int nodeIndex,
562 int32_t operatorCode)
563{
564 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
565 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
566
567 armnn::TransposeConvolution2dDescriptor descriptor;
568 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
569 descriptor.m_BiasEnabled = false;
570 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
571 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
572 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
573
574 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
575 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
576 if(!IsValid(&tfLiteOutputShapeTensor))
577 {
578 TF_LITE_MAYBE_KERNEL_LOG(
579 tfLiteContext,
580 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
581 operatorCode, nodeIndex);
582 return kTfLiteError;
583 }
584 if (IsDynamicTensor(tfLiteOutputShapeTensor))
585 {
586 TF_LITE_MAYBE_KERNEL_LOG(
587 tfLiteContext,
588 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
589 operatorCode, nodeIndex);
590 return kTfLiteError;
591 }
592
593 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
594 std::vector<int32_t> outputShape(tensorInfo.GetNumElements());
595 if (tensorInfo.GetDataType() == armnn::DataType::Signed32)
596 {
597 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
598 {
599 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
600 }
601 }
602
603 if (tensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
604 {
605 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
606 {
607 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
608 }
609 }
610 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
611 for (int dimension : outputShape)
612 {
613 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
614 }
615 descriptor.m_OutputShapeEnabled = true;
616
617 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
618 if(!IsValid(&tfLiteInputTensor))
619 {
620 TF_LITE_MAYBE_KERNEL_LOG(
621 tfLiteContext,
622 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
623 operatorCode, nodeIndex);
624 return kTfLiteError;
625 }
626 if (IsDynamicTensor(tfLiteInputTensor))
627 {
628 TF_LITE_MAYBE_KERNEL_LOG(
629 tfLiteContext,
630 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
631 operatorCode, nodeIndex);
632 return kTfLiteError;
633 }
634
635 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
636 if(!IsValid(&tfLiteOutputTensor))
637 {
638 TF_LITE_MAYBE_KERNEL_LOG(
639 tfLiteContext,
640 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
641 operatorCode, nodeIndex);
642 return kTfLiteError;
643 }
644 if (IsDynamicTensor(tfLiteOutputTensor))
645 {
646 TF_LITE_MAYBE_KERNEL_LOG(
647 tfLiteContext,
648 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
649 operatorCode, nodeIndex);
650 return kTfLiteError;
651 }
652
653 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
654 if(!IsValid(&tfLiteFilterTensor))
655 {
656 TF_LITE_MAYBE_KERNEL_LOG(
657 tfLiteContext,
658 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
659 operatorCode, nodeIndex);
660 return kTfLiteError;
661 }
662 if (IsDynamicTensor(tfLiteFilterTensor))
663 {
664 TF_LITE_MAYBE_KERNEL_LOG(
665 tfLiteContext,
666 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
667 operatorCode, nodeIndex);
668 return kTfLiteError;
669 }
670
671 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100672 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000673 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
674
675 // TfLite uses NHWC tensors
676 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
677 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
678
679 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
680 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
681
682 // Calculate padding
683 CalcPadding(inputHeight,
684 filterHeight,
685 descriptor.m_StrideY,
686 1, // dilation y
687 descriptor.m_PadTop,
688 descriptor.m_PadBottom,
689 parameters->padding);
690 CalcPadding(inputWidth,
691 filterWidth,
692 descriptor.m_StrideX,
693 1, // dilation x
694 descriptor.m_PadLeft,
695 descriptor.m_PadRight,
696 parameters->padding);
697
698 // Set up filter
699 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
700 filterTensorInfo,
701 armnn::Optional<armnn::PermutationVector&>());
702 if (!delegateData.m_Network)
703 {
704 bool isSupported = false;
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000705 FORWARD_LAYER_SUPPORT_FUNC("TRANSPOSE_CONV2D",
Sadik Armagan32ca1442020-11-13 17:51:56 +0000706 tfLiteContext,
707 IsTransposeConvolution2dSupported,
708 delegateData.m_Backends,
709 isSupported,
710 inputTensorInfo,
711 outputTensorInfo,
712 descriptor,
713 filterTensorInfo,
714 armnn::EmptyOptional());
715 return isSupported ? kTfLiteOk : kTfLiteError;
716 }
717
718 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
719 filterTensor,
720 armnn::EmptyOptional());
721 ARMNN_ASSERT(layer != nullptr);
722
723 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
724 outputSlot.SetTensorInfo(outputTensorInfo);
725
726 // Connect
Keith Davis892fafe2020-11-26 17:40:35 +0000727 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000728 {
Keith Davis892fafe2020-11-26 17:40:35 +0000729 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
730 Connect(layer->GetInputSlot(0));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000731 }
732
733 // Prepare output slots
734 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
735 {
736 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Keith Davis892fafe2020-11-26 17:40:35 +0000737 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
738 &outputSlot;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000739 }
740 return kTfLiteOk;
741}
742
Sadik Armagan62483be2020-10-23 17:14:43 +0100743TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
744 TfLiteContext* tfLiteContext,
745 TfLiteNode* tfLiteNode,
746 int nodeIndex,
747 int32_t operatorCode)
748{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000749 switch(operatorCode)
750 {
751 case kTfLiteBuiltinConv2d:
752 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan81ec9942021-10-12 10:26:30 +0100753// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
754#if defined(ARMNN_POST_TFLITE_2_5)
755 case kTfLiteBuiltinConv3d:
756 return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
757#endif
Sadik Armagan32ca1442020-11-13 17:51:56 +0000758 case kTfLiteBuiltinDepthwiseConv2d:
759 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
760 case kTfLiteBuiltinTransposeConv:
761 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
762 default:
763 return kTfLiteError;
764 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100765}
766
767} // namespace armnnDelegate