blob: 96612e021483f37401b78c50a90b32eb864995f3 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan32ca1442020-11-13 17:51:56 +00008#include "DelegateUtils.hpp"
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan32ca1442020-11-13 17:51:56 +000014#include "tensorflow/lite/kernels/internal/tensor.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010015
16namespace armnnDelegate
17{
18
Sadik Armagan32ca1442020-11-13 17:51:56 +000019TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
20 TfLiteContext* tfLiteContext,
21 TfLiteNode* tfLiteNode,
22 int nodeIndex,
23 int32_t operatorCode)
24{
25 auto numInputs = tfLiteNode->inputs->size;
26 if (numInputs < 2)
27 {
28 TF_LITE_MAYBE_KERNEL_LOG(
29 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
30 2, numInputs, nodeIndex);
31 return kTfLiteError;
32 }
33 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
34
35 armnn::Convolution2dDescriptor descriptor;
36 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
37
38 bool biasEnabled = tfLiteNode->inputs->size > 2;
39 descriptor.m_BiasEnabled = biasEnabled;
40 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
41 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
42 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
43 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
44 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
45
46 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
47 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
48 if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
49 {
50 TF_LITE_MAYBE_KERNEL_LOG(
51 tfLiteContext,
52 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
53 operatorCode, nodeIndex);
54 return kTfLiteError;
55 }
56 if (IsDynamicTensor(tfLiteInputTensor))
57 {
58 TF_LITE_MAYBE_KERNEL_LOG(
59 tfLiteContext,
60 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
61 operatorCode, nodeIndex);
62 return kTfLiteError;
63 }
64 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
65 if(!IsValid(&tfLiteOutputTensor))
66 {
67 TF_LITE_MAYBE_KERNEL_LOG(
68 tfLiteContext,
69 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
70 operatorCode, nodeIndex);
71 return kTfLiteError;
72 }
73 if (IsDynamicTensor(tfLiteOutputTensor))
74 {
75 TF_LITE_MAYBE_KERNEL_LOG(
76 tfLiteContext,
77 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
78 operatorCode, nodeIndex);
79 return kTfLiteError;
80 }
81
82 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
83 if(!IsValid(&tfLiteFilterTensor))
84 {
85 TF_LITE_MAYBE_KERNEL_LOG(
86 tfLiteContext,
87 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
88 operatorCode, nodeIndex);
89 return kTfLiteError;
90 }
91 if (IsDynamicTensor(tfLiteFilterTensor))
92 {
93 TF_LITE_MAYBE_KERNEL_LOG(
94 tfLiteContext,
95 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
96 nodeIndex);
97 return kTfLiteError;
98 }
99
100 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
101 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
102
103 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
104
105 armnn::TensorInfo biasTensorInfo;
106 if(biasEnabled)
107 {
108 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
109 if(!IsValid(&tfLiteBiasTensor))
110 {
111 TF_LITE_MAYBE_KERNEL_LOG(
112 tfLiteContext,
113 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
114 operatorCode, nodeIndex);
115 return kTfLiteError;
116 }
117 if (IsDynamicTensor(tfLiteBiasTensor))
118 {
119 TF_LITE_MAYBE_KERNEL_LOG(
120 tfLiteContext,
121 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
122 nodeIndex);
123 return kTfLiteError;
124 }
125 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
126 }
127 else
128 {
129 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
130 }
131
132 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
133
134 // TfLite uses NHWC tensors
135 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
136 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
137
138 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
139 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
140
141 // Calculate padding
142 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
143 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
144 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
145 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
146
147 if (!delegateData.m_Network)
148 {
149 bool isSupported = false;
150 FORWARD_LAYER_SUPPORT_FUNC(__func__,
151 tfLiteContext,
152 IsConvolution2dSupported,
153 delegateData.m_Backends,
154 isSupported,
155 inputTensorInfo,
156 outputTensorInfo,
157 descriptor,
158 filterTensorInfo,
159 optionalBiasInfo);
160 return isSupported ? kTfLiteOk : kTfLiteError;
161 }
162
163 armnn::IConnectableLayer* layer = nullptr;
164
165 // Set up filter and biases
166 auto filter =
167 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
168 filterTensorInfo,
169 armnn::Optional<armnn::PermutationVector&>());
170
171 if(biasEnabled)
172 {
173 auto biases =
174 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
175 biasTensorInfo,
176 armnn::Optional<armnn::PermutationVector&>());
177 layer = delegateData.m_Network->AddConvolution2dLayer(descriptor,
178 filter,
179 armnn::Optional<armnn::ConstTensor>(biases));
180 }
181 else
182 {
183 layer = delegateData.m_Network->AddConvolution2dLayer(descriptor,
184 filter,
185 armnn::EmptyOptional());
186 }
187
188 ARMNN_ASSERT(layer != nullptr);
189
190 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
191 outputSlot.SetTensorInfo(outputTensorInfo);
192
193 Connect(layer, tfLiteNode, delegateData);
194
195 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
196 if (!tfLiteNodeParameters)
197 {
198 // No Activation
199 return kTfLiteOk;
200 }
201 // Check activation
202 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
203 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
204
205}
206
207TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
208 TfLiteContext* tfLiteContext,
209 TfLiteNode* tfLiteNode,
210 int nodeIndex,
211 int32_t operatorCode)
212{
213 auto numInputs = tfLiteNode->inputs->size;
214 if (numInputs < 2)
215 {
216 TF_LITE_MAYBE_KERNEL_LOG(
217 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
218 2, numInputs, nodeIndex);
219 return kTfLiteError;
220 }
221 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
222
223 bool biasEnabled = tfLiteNode->inputs->size > 2;
224
225 armnn::DepthwiseConvolution2dDescriptor descriptor;
226 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
227
228 descriptor.m_BiasEnabled = biasEnabled;
229 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
230 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
231 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
232 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
233 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
234
235 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
236 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
237 if(!IsValid(&tfLiteInputTensor))
238 {
239 TF_LITE_MAYBE_KERNEL_LOG(
240 tfLiteContext,
241 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
242 operatorCode, nodeIndex);
243 return kTfLiteError;
244 }
245 if (IsDynamicTensor(tfLiteInputTensor))
246 {
247 TF_LITE_MAYBE_KERNEL_LOG(
248 tfLiteContext,
249 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
250 operatorCode, nodeIndex);
251 return kTfLiteError;
252 }
253 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
254 if(!IsValid(&tfLiteOutputTensor))
255 {
256 TF_LITE_MAYBE_KERNEL_LOG(
257 tfLiteContext,
258 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
259 operatorCode, nodeIndex);
260 return kTfLiteError;
261 }
262 if (IsDynamicTensor(tfLiteOutputTensor))
263 {
264 TF_LITE_MAYBE_KERNEL_LOG(
265 tfLiteContext,
266 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
267 operatorCode, nodeIndex);
268 return kTfLiteError;
269 }
270
271 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
272 if(!IsValid(&tfLiteFilterTensor))
273 {
274 TF_LITE_MAYBE_KERNEL_LOG(
275 tfLiteContext,
276 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
277 operatorCode, nodeIndex);
278 return kTfLiteError;
279 }
280 if (IsDynamicTensor(tfLiteFilterTensor))
281 {
282 TF_LITE_MAYBE_KERNEL_LOG(
283 tfLiteContext,
284 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
285 nodeIndex);
286 return kTfLiteError;
287 }
288
289 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
290 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
291
Jan Eilers7612bd62021-04-06 17:29:03 +0100292 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000293
294 // Assuming input is NHWC
295 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
296 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
297
298 // TensorflowLite weights come in the format [1, H, W, I * M]
299 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
300 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
301
Sadik Armagan32ca1442020-11-13 17:51:56 +0000302 // Calculate padding
303 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
304 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
305 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
306 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
307
308 armnn::TensorInfo biasTensorInfo;
309 if(biasEnabled)
310 {
311 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
312 if(!IsValid(&tfLiteBiasTensor))
313 {
314 TF_LITE_MAYBE_KERNEL_LOG(
315 tfLiteContext,
316 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
317 operatorCode, nodeIndex);
318 return kTfLiteError;
319 }
320 if (IsDynamicTensor(tfLiteBiasTensor))
321 {
322 TF_LITE_MAYBE_KERNEL_LOG(
323 tfLiteContext,
324 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
325 nodeIndex);
326 return kTfLiteError;
327 }
328 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
329 }
330 else
331 {
332 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
333 }
334
Jan Eilers53ef7952021-06-02 12:01:25 +0100335 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
336 auto filter = CreateConstTensor(&tfLiteFilterTensor, filterTensorInfo);
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000337
Sadik Armagan32ca1442020-11-13 17:51:56 +0000338 if (!delegateData.m_Network)
339 {
340 bool isSupported = false;
341 FORWARD_LAYER_SUPPORT_FUNC(__func__,
342 tfLiteContext,
343 IsDepthwiseConvolutionSupported,
344 delegateData.m_Backends,
345 isSupported,
346 inputTensorInfo,
347 outputTensorInfo,
348 descriptor,
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000349 filter.GetInfo(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000350 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
351 return isSupported ? kTfLiteOk : kTfLiteError;
352 }
353
354 armnn::IConnectableLayer* layer = nullptr;
Narumol Prangnawarat16725422020-11-20 16:17:48 +0000355
Sadik Armagan32ca1442020-11-13 17:51:56 +0000356 if(biasEnabled)
357 {
358 auto biases =
359 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
Jan Eilers53ef7952021-06-02 12:01:25 +0100360 biasTensorInfo);
Sadik Armagan32ca1442020-11-13 17:51:56 +0000361 layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
362 filter,
363 armnn::Optional<armnn::ConstTensor>(biases));
364 }
365 else
366 {
367 layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
368 filter,
369 armnn::EmptyOptional());
370 }
371
372 ARMNN_ASSERT(layer != nullptr);
373
374 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
375 outputSlot.SetTensorInfo(outputTensorInfo);
376
377 Connect(layer, tfLiteNode, delegateData);
378 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
379 if (!tfLiteNodeParameters)
380 {
381 // No Activation
382 return kTfLiteOk;
383 }
384 // Check activation
385 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
386 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
387}
388
389TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
390 TfLiteContext* tfLiteContext,
391 TfLiteNode* tfLiteNode,
392 int nodeIndex,
393 int32_t operatorCode)
394{
395 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
396 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
397
398 armnn::TransposeConvolution2dDescriptor descriptor;
399 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
400 descriptor.m_BiasEnabled = false;
401 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
402 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
403 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
404
405 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
406 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
407 if(!IsValid(&tfLiteOutputShapeTensor))
408 {
409 TF_LITE_MAYBE_KERNEL_LOG(
410 tfLiteContext,
411 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
412 operatorCode, nodeIndex);
413 return kTfLiteError;
414 }
415 if (IsDynamicTensor(tfLiteOutputShapeTensor))
416 {
417 TF_LITE_MAYBE_KERNEL_LOG(
418 tfLiteContext,
419 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
420 operatorCode, nodeIndex);
421 return kTfLiteError;
422 }
423
424 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
425 std::vector<int32_t> outputShape(tensorInfo.GetNumElements());
426 if (tensorInfo.GetDataType() == armnn::DataType::Signed32)
427 {
428 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
429 {
430 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
431 }
432 }
433
434 if (tensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
435 {
436 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
437 {
438 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
439 }
440 }
441 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
442 for (int dimension : outputShape)
443 {
444 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
445 }
446 descriptor.m_OutputShapeEnabled = true;
447
448 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
449 if(!IsValid(&tfLiteInputTensor))
450 {
451 TF_LITE_MAYBE_KERNEL_LOG(
452 tfLiteContext,
453 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
454 operatorCode, nodeIndex);
455 return kTfLiteError;
456 }
457 if (IsDynamicTensor(tfLiteInputTensor))
458 {
459 TF_LITE_MAYBE_KERNEL_LOG(
460 tfLiteContext,
461 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
462 operatorCode, nodeIndex);
463 return kTfLiteError;
464 }
465
466 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
467 if(!IsValid(&tfLiteOutputTensor))
468 {
469 TF_LITE_MAYBE_KERNEL_LOG(
470 tfLiteContext,
471 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
472 operatorCode, nodeIndex);
473 return kTfLiteError;
474 }
475 if (IsDynamicTensor(tfLiteOutputTensor))
476 {
477 TF_LITE_MAYBE_KERNEL_LOG(
478 tfLiteContext,
479 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
480 operatorCode, nodeIndex);
481 return kTfLiteError;
482 }
483
484 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
485 if(!IsValid(&tfLiteFilterTensor))
486 {
487 TF_LITE_MAYBE_KERNEL_LOG(
488 tfLiteContext,
489 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
490 operatorCode, nodeIndex);
491 return kTfLiteError;
492 }
493 if (IsDynamicTensor(tfLiteFilterTensor))
494 {
495 TF_LITE_MAYBE_KERNEL_LOG(
496 tfLiteContext,
497 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
498 operatorCode, nodeIndex);
499 return kTfLiteError;
500 }
501
502 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
503 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
504 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
505
506 // TfLite uses NHWC tensors
507 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
508 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
509
510 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
511 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
512
513 // Calculate padding
514 CalcPadding(inputHeight,
515 filterHeight,
516 descriptor.m_StrideY,
517 1, // dilation y
518 descriptor.m_PadTop,
519 descriptor.m_PadBottom,
520 parameters->padding);
521 CalcPadding(inputWidth,
522 filterWidth,
523 descriptor.m_StrideX,
524 1, // dilation x
525 descriptor.m_PadLeft,
526 descriptor.m_PadRight,
527 parameters->padding);
528
529 // Set up filter
530 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
531 filterTensorInfo,
532 armnn::Optional<armnn::PermutationVector&>());
533 if (!delegateData.m_Network)
534 {
535 bool isSupported = false;
536 FORWARD_LAYER_SUPPORT_FUNC(__func__,
537 tfLiteContext,
538 IsTransposeConvolution2dSupported,
539 delegateData.m_Backends,
540 isSupported,
541 inputTensorInfo,
542 outputTensorInfo,
543 descriptor,
544 filterTensorInfo,
545 armnn::EmptyOptional());
546 return isSupported ? kTfLiteOk : kTfLiteError;
547 }
548
549 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
550 filterTensor,
551 armnn::EmptyOptional());
552 ARMNN_ASSERT(layer != nullptr);
553
554 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
555 outputSlot.SetTensorInfo(outputTensorInfo);
556
557 // Connect
Keith Davis892fafe2020-11-26 17:40:35 +0000558 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])] != nullptr)
Sadik Armagan32ca1442020-11-13 17:51:56 +0000559 {
Keith Davis892fafe2020-11-26 17:40:35 +0000560 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->inputs->data[2])]->
561 Connect(layer->GetInputSlot(0));
Sadik Armagan32ca1442020-11-13 17:51:56 +0000562 }
563
564 // Prepare output slots
565 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
566 {
567 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
Keith Davis892fafe2020-11-26 17:40:35 +0000568 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(tfLiteNode->outputs->data[outputIndex])] =
569 &outputSlot;
Sadik Armagan32ca1442020-11-13 17:51:56 +0000570 }
571 return kTfLiteOk;
572}
573
Sadik Armagan62483be2020-10-23 17:14:43 +0100574TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
575 TfLiteContext* tfLiteContext,
576 TfLiteNode* tfLiteNode,
577 int nodeIndex,
578 int32_t operatorCode)
579{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000580 switch(operatorCode)
581 {
582 case kTfLiteBuiltinConv2d:
583 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
584 case kTfLiteBuiltinDepthwiseConv2d:
585 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
586 case kTfLiteBuiltinTransposeConv:
587 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
588 default:
589 return kTfLiteError;
590 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100591}
592
593} // namespace armnnDelegate