blob: 2d9fdba122c925046cd4964b8c5895ab4f9c4104 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Sadik Armagan32ca1442020-11-13 17:51:56 +00008#include "DelegateUtils.hpp"
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
Sadik Armagan32ca1442020-11-13 17:51:56 +000014#include "tensorflow/lite/kernels/internal/tensor.h"
Sadik Armagan62483be2020-10-23 17:14:43 +010015
16namespace armnnDelegate
17{
18
Sadik Armagan32ca1442020-11-13 17:51:56 +000019TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
20 TfLiteContext* tfLiteContext,
21 TfLiteNode* tfLiteNode,
22 int nodeIndex,
23 int32_t operatorCode)
24{
25 auto numInputs = tfLiteNode->inputs->size;
26 if (numInputs < 2)
27 {
28 TF_LITE_MAYBE_KERNEL_LOG(
29 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
30 2, numInputs, nodeIndex);
31 return kTfLiteError;
32 }
33 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
34
35 armnn::Convolution2dDescriptor descriptor;
36 const auto params = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
37
38 bool biasEnabled = tfLiteNode->inputs->size > 2;
39 descriptor.m_BiasEnabled = biasEnabled;
40 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
41 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
42 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
43 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
44 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
45
46 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
47 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
48 if(!IsValid(&tfLiteTensors[tfLiteNode->inputs->data[0]]))
49 {
50 TF_LITE_MAYBE_KERNEL_LOG(
51 tfLiteContext,
52 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
53 operatorCode, nodeIndex);
54 return kTfLiteError;
55 }
56 if (IsDynamicTensor(tfLiteInputTensor))
57 {
58 TF_LITE_MAYBE_KERNEL_LOG(
59 tfLiteContext,
60 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
61 operatorCode, nodeIndex);
62 return kTfLiteError;
63 }
64 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
65 if(!IsValid(&tfLiteOutputTensor))
66 {
67 TF_LITE_MAYBE_KERNEL_LOG(
68 tfLiteContext,
69 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
70 operatorCode, nodeIndex);
71 return kTfLiteError;
72 }
73 if (IsDynamicTensor(tfLiteOutputTensor))
74 {
75 TF_LITE_MAYBE_KERNEL_LOG(
76 tfLiteContext,
77 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
78 operatorCode, nodeIndex);
79 return kTfLiteError;
80 }
81
82 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
83 if(!IsValid(&tfLiteFilterTensor))
84 {
85 TF_LITE_MAYBE_KERNEL_LOG(
86 tfLiteContext,
87 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
88 operatorCode, nodeIndex);
89 return kTfLiteError;
90 }
91 if (IsDynamicTensor(tfLiteFilterTensor))
92 {
93 TF_LITE_MAYBE_KERNEL_LOG(
94 tfLiteContext,
95 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
96 nodeIndex);
97 return kTfLiteError;
98 }
99
100 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
101 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
102
103 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
104
105 armnn::TensorInfo biasTensorInfo;
106 if(biasEnabled)
107 {
108 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
109 if(!IsValid(&tfLiteBiasTensor))
110 {
111 TF_LITE_MAYBE_KERNEL_LOG(
112 tfLiteContext,
113 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
114 operatorCode, nodeIndex);
115 return kTfLiteError;
116 }
117 if (IsDynamicTensor(tfLiteBiasTensor))
118 {
119 TF_LITE_MAYBE_KERNEL_LOG(
120 tfLiteContext,
121 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
122 nodeIndex);
123 return kTfLiteError;
124 }
125 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
126 }
127 else
128 {
129 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
130 }
131
132 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
133
134 // TfLite uses NHWC tensors
135 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
136 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
137
138 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
139 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
140
141 // Calculate padding
142 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
143 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
144 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
145 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
146
147 if (!delegateData.m_Network)
148 {
149 bool isSupported = false;
150 FORWARD_LAYER_SUPPORT_FUNC(__func__,
151 tfLiteContext,
152 IsConvolution2dSupported,
153 delegateData.m_Backends,
154 isSupported,
155 inputTensorInfo,
156 outputTensorInfo,
157 descriptor,
158 filterTensorInfo,
159 optionalBiasInfo);
160 return isSupported ? kTfLiteOk : kTfLiteError;
161 }
162
163 armnn::IConnectableLayer* layer = nullptr;
164
165 // Set up filter and biases
166 auto filter =
167 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[1]],
168 filterTensorInfo,
169 armnn::Optional<armnn::PermutationVector&>());
170
171 if(biasEnabled)
172 {
173 auto biases =
174 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
175 biasTensorInfo,
176 armnn::Optional<armnn::PermutationVector&>());
177 layer = delegateData.m_Network->AddConvolution2dLayer(descriptor,
178 filter,
179 armnn::Optional<armnn::ConstTensor>(biases));
180 }
181 else
182 {
183 layer = delegateData.m_Network->AddConvolution2dLayer(descriptor,
184 filter,
185 armnn::EmptyOptional());
186 }
187
188 ARMNN_ASSERT(layer != nullptr);
189
190 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
191 outputSlot.SetTensorInfo(outputTensorInfo);
192
193 Connect(layer, tfLiteNode, delegateData);
194
195 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(tfLiteNode->builtin_data);
196 if (!tfLiteNodeParameters)
197 {
198 // No Activation
199 return kTfLiteOk;
200 }
201 // Check activation
202 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
203 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
204
205}
206
207TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
208 TfLiteContext* tfLiteContext,
209 TfLiteNode* tfLiteNode,
210 int nodeIndex,
211 int32_t operatorCode)
212{
213 auto numInputs = tfLiteNode->inputs->size;
214 if (numInputs < 2)
215 {
216 TF_LITE_MAYBE_KERNEL_LOG(
217 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
218 2, numInputs, nodeIndex);
219 return kTfLiteError;
220 }
221 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
222
223 bool biasEnabled = tfLiteNode->inputs->size > 2;
224
225 armnn::DepthwiseConvolution2dDescriptor descriptor;
226 const auto params = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
227
228 descriptor.m_BiasEnabled = biasEnabled;
229 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
230 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
231 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
232 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
233 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
234
235 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
236 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
237 if(!IsValid(&tfLiteInputTensor))
238 {
239 TF_LITE_MAYBE_KERNEL_LOG(
240 tfLiteContext,
241 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
242 operatorCode, nodeIndex);
243 return kTfLiteError;
244 }
245 if (IsDynamicTensor(tfLiteInputTensor))
246 {
247 TF_LITE_MAYBE_KERNEL_LOG(
248 tfLiteContext,
249 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
250 operatorCode, nodeIndex);
251 return kTfLiteError;
252 }
253 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
254 if(!IsValid(&tfLiteOutputTensor))
255 {
256 TF_LITE_MAYBE_KERNEL_LOG(
257 tfLiteContext,
258 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
259 operatorCode, nodeIndex);
260 return kTfLiteError;
261 }
262 if (IsDynamicTensor(tfLiteOutputTensor))
263 {
264 TF_LITE_MAYBE_KERNEL_LOG(
265 tfLiteContext,
266 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
267 operatorCode, nodeIndex);
268 return kTfLiteError;
269 }
270
271 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
272 if(!IsValid(&tfLiteFilterTensor))
273 {
274 TF_LITE_MAYBE_KERNEL_LOG(
275 tfLiteContext,
276 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
277 operatorCode, nodeIndex);
278 return kTfLiteError;
279 }
280 if (IsDynamicTensor(tfLiteFilterTensor))
281 {
282 TF_LITE_MAYBE_KERNEL_LOG(
283 tfLiteContext,
284 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in node #%d: ",
285 nodeIndex);
286 return kTfLiteError;
287 }
288
289 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
290 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
291
292 // Mappings from TensorflowLite filter tensors to the ArmNN filter tensors (ArmNN weights have to be [M, I, H, W])
293 armnn::PermutationVector permutationVector{ 2, 3, 1, 0 }; // [H, W, I, M] -> [M, I, H, W]
294 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor, permutationVector);
295
296 // Assuming input is NHWC
297 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
298 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
299
300 // TensorflowLite weights come in the format [1, H, W, I * M]
301 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
302 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
303
304 // Reshape weights as [ H, W, I, M ]
305 filterTensorInfo.SetShape({ filterHeight,
306 filterWidth,
307 inputTensorInfo.GetShape()[3],
308 filterTensorInfo.GetShape()[3] / inputTensorInfo.GetShape()[3] });
309
310 // Calculate padding
311 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
312 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
313 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
314 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
315
316 armnn::TensorInfo biasTensorInfo;
317 if(biasEnabled)
318 {
319 const TfLiteTensor& tfLiteBiasTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
320 if(!IsValid(&tfLiteBiasTensor))
321 {
322 TF_LITE_MAYBE_KERNEL_LOG(
323 tfLiteContext,
324 "TfLiteArmnnDelegate: Invalid bias tensor in operator #%d node #%d: ",
325 operatorCode, nodeIndex);
326 return kTfLiteError;
327 }
328 if (IsDynamicTensor(tfLiteBiasTensor))
329 {
330 TF_LITE_MAYBE_KERNEL_LOG(
331 tfLiteContext,
332 "TfLiteArmnnDelegate: Dynamic bias tensors are not supported in node #%d: ",
333 nodeIndex);
334 return kTfLiteError;
335 }
336 biasTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteBiasTensor);
337 }
338 else
339 {
340 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
341 }
342
Narumol Prangnawarat8c2f6512020-11-20 16:17:48 +0000343 std::vector<uint8_t> swizzledData(filterTensorInfo.GetNumBytes());
344 auto filter =
345 CreateConstTensor(&tfLiteFilterTensor,
346 filterTensorInfo,
347 armnn::Optional<armnn::PermutationVector&>(permutationVector),
348 swizzledData.data());
349
Sadik Armagan32ca1442020-11-13 17:51:56 +0000350 if (!delegateData.m_Network)
351 {
352 bool isSupported = false;
353 FORWARD_LAYER_SUPPORT_FUNC(__func__,
354 tfLiteContext,
355 IsDepthwiseConvolutionSupported,
356 delegateData.m_Backends,
357 isSupported,
358 inputTensorInfo,
359 outputTensorInfo,
360 descriptor,
Narumol Prangnawarat8c2f6512020-11-20 16:17:48 +0000361 filter.GetInfo(),
Sadik Armagan32ca1442020-11-13 17:51:56 +0000362 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
363 return isSupported ? kTfLiteOk : kTfLiteError;
364 }
365
366 armnn::IConnectableLayer* layer = nullptr;
Narumol Prangnawarat8c2f6512020-11-20 16:17:48 +0000367
Sadik Armagan32ca1442020-11-13 17:51:56 +0000368 if(biasEnabled)
369 {
370 auto biases =
371 CreateConstTensor(&tfLiteContext->tensors[tfLiteNode->inputs->data[2]],
372 biasTensorInfo,
373 armnn::Optional<armnn::PermutationVector&>());
374 layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
375 filter,
376 armnn::Optional<armnn::ConstTensor>(biases));
377 }
378 else
379 {
380 layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
381 filter,
382 armnn::EmptyOptional());
383 }
384
385 ARMNN_ASSERT(layer != nullptr);
386
387 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
388 outputSlot.SetTensorInfo(outputTensorInfo);
389
390 Connect(layer, tfLiteNode, delegateData);
391 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteDepthwiseConvParams*>(tfLiteNode->builtin_data);
392 if (!tfLiteNodeParameters)
393 {
394 // No Activation
395 return kTfLiteOk;
396 }
397 // Check activation
398 TfLiteFusedActivation activationType = tfLiteNodeParameters->activation;
399 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
400}
401
402TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
403 TfLiteContext* tfLiteContext,
404 TfLiteNode* tfLiteNode,
405 int nodeIndex,
406 int32_t operatorCode)
407{
408 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
409 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
410
411 armnn::TransposeConvolution2dDescriptor descriptor;
412 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(tfLiteNode->builtin_data);
413 descriptor.m_BiasEnabled = false;
414 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
415 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
416 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
417
418 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
419 const TfLiteTensor& tfLiteOutputShapeTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
420 if(!IsValid(&tfLiteOutputShapeTensor))
421 {
422 TF_LITE_MAYBE_KERNEL_LOG(
423 tfLiteContext,
424 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
425 operatorCode, nodeIndex);
426 return kTfLiteError;
427 }
428 if (IsDynamicTensor(tfLiteOutputShapeTensor))
429 {
430 TF_LITE_MAYBE_KERNEL_LOG(
431 tfLiteContext,
432 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
433 operatorCode, nodeIndex);
434 return kTfLiteError;
435 }
436
437 armnn::TensorInfo tensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputShapeTensor);
438 std::vector<int32_t> outputShape(tensorInfo.GetNumElements());
439 if (tensorInfo.GetDataType() == armnn::DataType::Signed32)
440 {
441 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
442 {
443 outputShape[i] = ::tflite::GetTensorData<int32_t>(&tfLiteOutputShapeTensor)[i];
444 }
445 }
446
447 if (tensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
448 {
449 for(unsigned int i=0; i < tensorInfo.GetNumElements(); i++)
450 {
451 outputShape[i] = ::tflite::GetTensorData<uint8_t>(&tfLiteOutputShapeTensor)[i];
452 }
453 }
454 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
455 for (int dimension : outputShape)
456 {
457 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
458 }
459 descriptor.m_OutputShapeEnabled = true;
460
461 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
462 if(!IsValid(&tfLiteInputTensor))
463 {
464 TF_LITE_MAYBE_KERNEL_LOG(
465 tfLiteContext,
466 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
467 operatorCode, nodeIndex);
468 return kTfLiteError;
469 }
470 if (IsDynamicTensor(tfLiteInputTensor))
471 {
472 TF_LITE_MAYBE_KERNEL_LOG(
473 tfLiteContext,
474 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
475 operatorCode, nodeIndex);
476 return kTfLiteError;
477 }
478
479 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
480 if(!IsValid(&tfLiteOutputTensor))
481 {
482 TF_LITE_MAYBE_KERNEL_LOG(
483 tfLiteContext,
484 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
485 operatorCode, nodeIndex);
486 return kTfLiteError;
487 }
488 if (IsDynamicTensor(tfLiteOutputTensor))
489 {
490 TF_LITE_MAYBE_KERNEL_LOG(
491 tfLiteContext,
492 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
493 operatorCode, nodeIndex);
494 return kTfLiteError;
495 }
496
497 const TfLiteTensor& tfLiteFilterTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
498 if(!IsValid(&tfLiteFilterTensor))
499 {
500 TF_LITE_MAYBE_KERNEL_LOG(
501 tfLiteContext,
502 "TfLiteArmnnDelegate: Invalid filter tensor in operator #%d node #%d: ",
503 operatorCode, nodeIndex);
504 return kTfLiteError;
505 }
506 if (IsDynamicTensor(tfLiteFilterTensor))
507 {
508 TF_LITE_MAYBE_KERNEL_LOG(
509 tfLiteContext,
510 "TfLiteArmnnDelegate: Dynamic filter tensors are not supported in operator #%d node #%d: ",
511 operatorCode, nodeIndex);
512 return kTfLiteError;
513 }
514
515 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
516 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
517 armnn::TensorInfo filterTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteFilterTensor);
518
519 // TfLite uses NHWC tensors
520 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
521 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
522
523 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
524 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
525
526 // Calculate padding
527 CalcPadding(inputHeight,
528 filterHeight,
529 descriptor.m_StrideY,
530 1, // dilation y
531 descriptor.m_PadTop,
532 descriptor.m_PadBottom,
533 parameters->padding);
534 CalcPadding(inputWidth,
535 filterWidth,
536 descriptor.m_StrideX,
537 1, // dilation x
538 descriptor.m_PadLeft,
539 descriptor.m_PadRight,
540 parameters->padding);
541
542 // Set up filter
543 auto filterTensor = CreateConstTensor(&tfLiteFilterTensor,
544 filterTensorInfo,
545 armnn::Optional<armnn::PermutationVector&>());
546 if (!delegateData.m_Network)
547 {
548 bool isSupported = false;
549 FORWARD_LAYER_SUPPORT_FUNC(__func__,
550 tfLiteContext,
551 IsTransposeConvolution2dSupported,
552 delegateData.m_Backends,
553 isSupported,
554 inputTensorInfo,
555 outputTensorInfo,
556 descriptor,
557 filterTensorInfo,
558 armnn::EmptyOptional());
559 return isSupported ? kTfLiteOk : kTfLiteError;
560 }
561
562 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
563 filterTensor,
564 armnn::EmptyOptional());
565 ARMNN_ASSERT(layer != nullptr);
566
567 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
568 outputSlot.SetTensorInfo(outputTensorInfo);
569
570 // Connect
571 if (delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[2]] != nullptr)
572 {
573 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[2]]->Connect(layer->GetInputSlot(0));
574 }
575
576 // Prepare output slots
577 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
578 {
579 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
580 delegateData.m_OutputSlotForNode[tfLiteNode->outputs->data[outputIndex]] = &outputSlot;
581 }
582 return kTfLiteOk;
583}
584
Sadik Armagan62483be2020-10-23 17:14:43 +0100585TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
586 TfLiteContext* tfLiteContext,
587 TfLiteNode* tfLiteNode,
588 int nodeIndex,
589 int32_t operatorCode)
590{
Sadik Armagan32ca1442020-11-13 17:51:56 +0000591 switch(operatorCode)
592 {
593 case kTfLiteBuiltinConv2d:
594 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
595 case kTfLiteBuiltinDepthwiseConv2d:
596 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
597 case kTfLiteBuiltinTransposeConv:
598 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
599 default:
600 return kTfLiteError;
601 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100602}
603
604} // namespace armnnDelegate