blob: 384c62b678b3e2cad3e0139a7ccd7a6d338c4c46 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Matthew Sloyan080ffd82023-04-24 12:53:04 +01005
6#include <OpaqueDelegateUtils.hpp>
7#include <SharedFunctions.hpp>
8
9#include <tensorflow/lite/builtin_ops.h>
10#include <tensorflow/lite/c/builtin_op_data.h>
11#include <tensorflow/lite/c/common.h>
12#include <tensorflow/lite/minimal_logging.h>
13
14namespace armnnOpaqueDelegate
15{
16
17TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
18 TfLiteOpaqueContext* tfLiteContext,
19 TfLiteOpaqueNode* tfLiteNode,
20 int nodeIndex,
21 int32_t operatorCode)
22{
23 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
24 if (numInputs < 2)
25 {
26 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
27 tfLiteContext,
28 "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
29 2, numInputs, nodeIndex);
30 return kTfLiteError;
31 }
32 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
33
34 // Gather input indices and use to get input tensor.
35 const int* inputTensors;
36 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
37 {
38 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
39 tfLiteContext,
40 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
41 nodeIndex);
42 return kTfLiteError;
43 }
44
45 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
46 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
47 {
48 return kTfLiteError;
49 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +010050
51 // Use input indices to get filter tensor.
52 const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +010053 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
Matthew Sloyan080ffd82023-04-24 12:53:04 +010054 {
Matthew Sloyan080ffd82023-04-24 12:53:04 +010055 return kTfLiteError;
56 }
57
58 // Gather output indices and use to get output tensors.
59 int numOutputs = 0;
60 const int* outputTensors;
61 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
62 {
63 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
64 tfLiteContext,
65 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
66 nodeIndex);
67 return kTfLiteError;
68 }
69
70 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
71 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
72 {
73 return kTfLiteError;
74 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +010075
76 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
77 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
78 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
79
80 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
81 TfLiteFusedActivation activationType = kTfLiteActNone;
82 if (tfLiteNodeParameters)
83 {
84 activationType = tfLiteNodeParameters->activation;
85 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
86 tfLiteContext,
87 outputTensorInfo,
88 outputTensorInfo,
89 activationType);
90 if(activationStatus != kTfLiteOk)
91 {
92 return kTfLiteError;
93 }
94 }
95
96 armnn::TensorInfo biasTensorInfo;
97 const TfLiteOpaqueTensor* tfLiteBiasTensor = nullptr;
98
99 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
100 if(biasEnabled)
101 {
102 // Use input indices to get bias tensor.
103 tfLiteBiasTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100104 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100105 {
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100106 return kTfLiteError;
107 }
108 biasTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBiasTensor);
109 }
110 else
111 {
112 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
113 }
114
115 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
116
117 armnn::Convolution2dDescriptor descriptor;
118 descriptor.m_BiasEnabled = biasEnabled;
119 descriptor.m_StrideX = NonNegative(tfLiteNodeParameters->stride_width, nodeIndex);
120 descriptor.m_StrideY = NonNegative(tfLiteNodeParameters->stride_height, nodeIndex);
121 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
122 descriptor.m_DilationX = NonNegative(tfLiteNodeParameters->dilation_width_factor, nodeIndex);
123 descriptor.m_DilationY = NonNegative(tfLiteNodeParameters->dilation_height_factor, nodeIndex);
124
125 // TfLite uses NHWC tensors
126 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
127 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
128
129 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
130 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
131
132 // Calculate padding
133 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
134 descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
135 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
136 descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
137
138 armnn::BackendId setBackend;
139 if (!delegateData.m_Network)
140 {
141 bool isSupported = false;
142 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONV2D",
143 tfLiteContext,
144 IsConvolution2dSupported,
145 delegateData.m_Backends,
146 isSupported,
147 setBackend,
148 inputTensorInfo,
149 outputTensorInfo,
150 descriptor,
151 filterTensorInfo,
152 optionalBiasInfo);
153 return isSupported ? kTfLiteOk : kTfLiteError;
154 }
155
156 // Set up filter and biases
Mike Kellya2806502023-08-03 10:42:11 +0100157 auto layerName = GetName(armnn::LayerType::Convolution2d, nodeIndex);
158 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor, layerName.c_str());
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100159 layer->SetBackendId(setBackend);
160
161 if(filterTensorInfo.IsConstant())
162 {
163 auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
164
Mike Kellya2806502023-08-03 10:42:11 +0100165 auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
166 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100167 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
168 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
169 }
170
171 if (biasEnabled)
172 {
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100173 if (biasTensorInfo.IsConstant())
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100174 {
175 auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
Mike Kellya2806502023-08-03 10:42:11 +0100176
177 auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
178 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
179 biasName.c_str());
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100180 ARMNN_ASSERT(biasLayer != nullptr);
181 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
182 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
183 }
184 }
185
186 // The data input can also be constant, so we must check that this is also allocated to an input slot
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100187 if (inputTensorInfo.IsConstant())
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100188 {
189 auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
190
Mike Kellya2806502023-08-03 10:42:11 +0100191 auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
192 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100193 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
194 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
195 }
196
197 ARMNN_ASSERT(layer != nullptr);
198
199 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
200 outputSlot.SetTensorInfo(outputTensorInfo);
201
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100202 if (Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100203 {
204 return kTfLiteError;
205 }
206
207 if (!tfLiteNodeParameters)
208 {
209 // No Activation
210 return kTfLiteOk;
211 }
212
213 // Check and Create activation
Mike Kellya2806502023-08-03 10:42:11 +0100214 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100215}
216
217TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
218 TfLiteOpaqueContext* tfLiteContext,
219 TfLiteOpaqueNode* tfLiteNode,
220 int nodeIndex,
221 int32_t operatorCode)
222{
223 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
224 if (numInputs < 2)
225 {
226 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
227 tfLiteContext,
228 "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
229 2, numInputs, nodeIndex);
230 return kTfLiteError;
231 }
232 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
233
234 // Gather input indices and use to get input tensor.
235 const int* inputTensors;
236 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
237 {
238 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
239 tfLiteContext,
240 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
241 nodeIndex);
242 return kTfLiteError;
243 }
244
245 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
246 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
247 {
248 return kTfLiteError;
249 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100250
251 // Use input indices to get filter tensor.
252 const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100253 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100254 {
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100255 return kTfLiteError;
256 }
257
258 // Gather output indices and use to get output tensors.
259 int numOutputs = 0;
260 const int* outputTensors;
261 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
262 {
263 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
264 tfLiteContext,
265 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
266 nodeIndex);
267 return kTfLiteError;
268 }
269
270 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
271 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
272 {
273 return kTfLiteError;
274 }
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100275
276 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
277 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
278 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
279
280 auto* tfLiteNodeParameters =
281 reinterpret_cast<TfLiteDepthwiseConvParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
282
283 TfLiteFusedActivation activationType = kTfLiteActNone;
284 if (tfLiteNodeParameters)
285 {
286 activationType = tfLiteNodeParameters->activation;
287 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
288 tfLiteContext,
289 outputTensorInfo,
290 outputTensorInfo,
291 activationType);
292 if(activationStatus != kTfLiteOk)
293 {
294 return kTfLiteError;
295 }
296 }
297
298 armnn::TensorInfo biasTensorInfo;
299 const TfLiteOpaqueTensor* tfLiteBiasTensor = nullptr;
300
301 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
302 if(biasEnabled)
303 {
304 // Use input indices to get bias tensor.
305 tfLiteBiasTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100306 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100307 {
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100308 return kTfLiteError;
309 }
310 biasTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBiasTensor);
311 }
312 else
313 {
314 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
315 }
316
317 armnn::DepthwiseConvolution2dDescriptor descriptor;
318 descriptor.m_BiasEnabled = biasEnabled;
319 descriptor.m_StrideX = NonNegative(tfLiteNodeParameters->stride_width, nodeIndex);
320 descriptor.m_StrideY = NonNegative(tfLiteNodeParameters->stride_height, nodeIndex);
321 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
322 descriptor.m_DilationX = NonNegative(tfLiteNodeParameters->dilation_width_factor, nodeIndex);
323 descriptor.m_DilationY = NonNegative(tfLiteNodeParameters->dilation_height_factor, nodeIndex);
324
325 // Assuming input is NHWC
326 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
327 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
328
329 // TensorflowLite weights come in the format [1, H, W, I * M]
330 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
331 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
332
333 // Calculate padding
334 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
335 descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
336 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
337 descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
338
339 armnn::BackendId setBackend;
340 if (!delegateData.m_Network)
341 {
342 bool isSupported = false;
343 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEPTHWISE_CONV2D",
344 tfLiteContext,
345 IsDepthwiseConvolutionSupported,
346 delegateData.m_Backends,
347 isSupported,
348 setBackend,
349 inputTensorInfo,
350 outputTensorInfo,
351 descriptor,
352 filterTensorInfo,
353 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
354 return isSupported ? kTfLiteOk : kTfLiteError;
355 }
356
Mike Kellya2806502023-08-03 10:42:11 +0100357 auto layerName = GetName(armnn::LayerType::DepthwiseConvolution2d, nodeIndex);
358 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor,
359 layerName.c_str());
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100360 layer->SetBackendId(setBackend);
361
362 if(filterTensorInfo.IsConstant())
363 {
364 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
365 auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
366
Mike Kellya2806502023-08-03 10:42:11 +0100367 auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
368 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100369 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
370 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
371 }
372
373 if (biasEnabled)
374 {
375 if(biasTensorInfo.IsConstant())
376 {
377 auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
378
Mike Kellya2806502023-08-03 10:42:11 +0100379 auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
380 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
381 biasName.c_str());
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100382 ARMNN_ASSERT(biasLayer != nullptr);
383 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
384 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
385 }
386 }
387
388 // The data input can also be constant, so we must check that this is also allocated to an input slot
389 if(inputTensorInfo.IsConstant())
390 {
391 auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
392
Mike Kellya2806502023-08-03 10:42:11 +0100393 auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
394 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100395 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
396 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
397 }
398
399 ARMNN_ASSERT(layer != nullptr);
400
401 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
402 outputSlot.SetTensorInfo(outputTensorInfo);
403
404 if(Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
405 {
406 return kTfLiteError;
407 }
408
409 if (!tfLiteNodeParameters)
410 {
411 // No Activation
412 return kTfLiteOk;
413 }
414 // Check and create activation
Mike Kellya2806502023-08-03 10:42:11 +0100415 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100416}
417
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100418TfLiteStatus VisitConv3dOperator(DelegateData& delegateData,
419 TfLiteOpaqueContext* tfLiteContext,
420 TfLiteOpaqueNode* tfLiteNode,
421 int nodeIndex,
422 int32_t operatorCode)
423{
424 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
425 if (numInputs < 2)
426 {
427 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
428 tfLiteContext, "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
429 2, numInputs, nodeIndex);
430 return kTfLiteError;
431 }
432 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
433
434 armnn::Convolution3dDescriptor descriptor;
435 auto* params = reinterpret_cast<TfLiteConv3DParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
436
437 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
438 descriptor.m_BiasEnabled = biasEnabled;
439 descriptor.m_DataLayout = armnn::DataLayout::NDHWC;
440 descriptor.m_StrideX = NonNegative(params->stride_width, nodeIndex);
441 descriptor.m_StrideY = NonNegative(params->stride_height, nodeIndex);
442 descriptor.m_StrideZ = NonNegative(params->stride_depth, nodeIndex);
443 descriptor.m_DilationX = NonNegative(params->dilation_width_factor, nodeIndex);
444 descriptor.m_DilationY = NonNegative(params->dilation_height_factor, nodeIndex);
445 descriptor.m_DilationZ = NonNegative(params->dilation_depth_factor, nodeIndex);
446
447 // Gather input indices and use to get input tensor.
448 const int* inputTensors;
449 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
450 {
451 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
452 tfLiteContext,
453 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
454 nodeIndex);
455 return kTfLiteError;
456 }
457
458 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
459 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
460 {
461 return kTfLiteError;
462 }
463
464 // Use input indices to get filter tensor.
465 const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
466 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
467 {
468 return kTfLiteError;
469 }
470
471 // Gather output indices and use to get output tensors.
472 int numOutputs = 0;
473 const int* outputTensors;
474 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
475 {
476 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
477 tfLiteContext,
478 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
479 nodeIndex);
480 return kTfLiteError;
481 }
482
483 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
484 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
485 {
486 return kTfLiteError;
487 }
488
489 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
490 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
491
492 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConv3DParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
493 TfLiteFusedActivation activationType=kTfLiteActNone;
494 if (tfLiteNodeParameters)
495 {
496 activationType = tfLiteNodeParameters->activation;
497 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
498 outputTensorInfo, activationType);
499 if(activationStatus != kTfLiteOk)
500 {
501 return kTfLiteError;
502 }
503
504 }
505
506 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
507
508 armnn::TensorInfo biasTensorInfo;
509 const TfLiteOpaqueTensor* tfLiteBiasTensor = nullptr;
510
511 if (biasEnabled)
512 {
513 // Use input indices to get bias tensor.
514 tfLiteBiasTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
515 if (!IsValid(tfLiteContext, tfLiteBiasTensor, operatorCode, nodeIndex))
516 {
517 return kTfLiteError;
518 }
519 biasTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBiasTensor);
520 }
521 else
522 {
523 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
524 }
525
526 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
527
528 // TfLite uses NDHWC tensors
529 const unsigned int inputDepth = inputTensorInfo.GetShape()[1];
530 const unsigned int inputHeight = inputTensorInfo.GetShape()[2];
531 const unsigned int inputWidth = inputTensorInfo.GetShape()[3];
532
533 // Assuming the filter is DHWIO : Depth, Height, Width, OutputChannels, InputChannels
534 const unsigned int filterDepth = filterTensorInfo.GetShape()[0];
535 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
536 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
537
538 // Calculate padding
539 CalcPadding(inputDepth, filterDepth, descriptor.m_StrideZ, descriptor.m_DilationZ,
540 descriptor.m_PadFront, descriptor.m_PadBack, params->padding);
541 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
542 descriptor.m_PadTop, descriptor.m_PadBottom, params->padding);
543 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
544 descriptor.m_PadLeft, descriptor.m_PadRight, params->padding);
545
546 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
547 // support for the operator
548 // If supported, VisitConvolutionOperator will be called again to add the layer to the network as seen below.
549 armnn::BackendId setBackend;
550 if (!delegateData.m_Network)
551 {
552 bool isSupported = false;
553 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONV3D",
554 tfLiteContext,
555 IsConvolution3dSupported,
556 delegateData.m_Backends,
557 isSupported,
558 setBackend,
559 inputTensorInfo,
560 outputTensorInfo,
561 descriptor,
562 filterTensorInfo,
563 optionalBiasInfo);
564 return isSupported ? kTfLiteOk : kTfLiteError;
565 }
566
Mike Kellya2806502023-08-03 10:42:11 +0100567 auto layerName = GetName(armnn::LayerType::Convolution3d, nodeIndex);
568 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution3dLayer(descriptor, layerName.c_str());
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100569 layer->SetBackendId(setBackend);
570 ARMNN_ASSERT(layer != nullptr);
571
572 // Add a constant layer for weights and biases if inputs are constant,
573 // which are connected to the Convolution3d layer as inputs.
574 if (filterTensorInfo.IsConstant())
575 {
576 auto filter = CreateConstTensor(tfLiteFilterTensor,
577 filterTensorInfo);
578
Mike Kellya2806502023-08-03 10:42:11 +0100579 auto filterName = GetName(armnn::LayerType::Constant, nodeIndex, "Filter");
580 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter, filterName.c_str());
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100581 ARMNN_ASSERT(weightsLayer != nullptr);
582
583 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
584 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
585 }
586
587 if (biasEnabled)
588 {
589 if (biasTensorInfo.IsConstant())
590 {
591 auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
592
Mike Kellya2806502023-08-03 10:42:11 +0100593 auto biasName = GetName(armnn::LayerType::Constant, nodeIndex, "Bias");
594 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor,
595 biasName.c_str());
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100596 ARMNN_ASSERT(biasLayer != nullptr);
597
598 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
599 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
600 }
601 }
602
603 // The data input can also be constant, so we must check that this is also allocated to an input slot
604 if (inputTensorInfo.IsConstant())
605 {
606 auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
607
Mike Kellya2806502023-08-03 10:42:11 +0100608 auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
609 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100610 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
611 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
612 }
613
614 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
615 outputSlot.SetTensorInfo(outputTensorInfo);
616
617 if (Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
618 {
619 return kTfLiteError;
620 }
621
622 if (!tfLiteNodeParameters)
623 {
624 // No Activation
625 return kTfLiteOk;
626 }
627
628 // Check and create activation
Mike Kellya2806502023-08-03 10:42:11 +0100629 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData, nodeIndex);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100630}
631
632
633
634TfLiteStatus VisitTransposeConv2dOperator(DelegateData& delegateData,
635 TfLiteOpaqueContext* tfLiteContext,
636 TfLiteOpaqueNode* tfLiteNode,
637 int nodeIndex,
638 int32_t operatorCode)
639{
640 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
641 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
642
643 armnn::TransposeConvolution2dDescriptor descriptor;
644 auto* parameters = reinterpret_cast<TfLiteTransposeConvParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
645 descriptor.m_BiasEnabled = false;
646 descriptor.m_StrideX = NonNegative(parameters->stride_width, nodeIndex);
647 descriptor.m_StrideY = NonNegative(parameters->stride_height, nodeIndex);
648 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
649
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100650 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
651 // Gather input indices and use to get input tensor.
652 const int* inputTensors;
653 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
654 {
655 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
656 tfLiteContext,
657 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
658 nodeIndex);
659 return kTfLiteError;
660 }
661
662 const TfLiteOpaqueTensor* tfLiteOutputShapeTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
663 inputTensors[0]);
664 if (!IsValid(tfLiteContext, tfLiteOutputShapeTensor, operatorCode, nodeIndex))
665 {
666 return kTfLiteError;
667 }
668
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100669 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
670 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
671 {
672 return kTfLiteError;
673 }
674
675 const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
676 if (!IsValid(tfLiteContext, tfLiteFilterTensor, operatorCode, nodeIndex))
677 {
678 return kTfLiteError;
679 }
680
681 // Gather output indices and use to get output tensors.
682 int numOutputs = 0;
683 const int* outputTensors;
684 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
685 {
686 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
687 tfLiteContext,
688 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
689 nodeIndex);
690 return kTfLiteError;
691 }
692 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
693 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
694 {
695 return kTfLiteError;
696 }
697
698 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
699 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
700 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
701
702 // TfLite uses NHWC tensors
703 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
704 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
705
706 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
707 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
708
Matthew Sloyanc52190a2023-05-08 11:33:55 +0100709 // This block determines the output shape of the transpose convolution.
710 // If the output shape tensor is a constant, we can access the data at load time and set the shape of the layer.
711 // If this is not constant, we do not have access to the shape data, so we have to use infer output shape.
712 if (IsConstantTensor(tfLiteOutputShapeTensor))
713 {
714 const armnn::TensorInfo outputShapeTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputShapeTensor);
715 std::vector<int32_t> outputShape(outputShapeTensorInfo.GetNumElements());
716 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::Signed32)
717 {
718 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); ++i)
719 {
720 outputShape[i] = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteOutputShapeTensor))[i];
721 }
722 }
723
724 if (outputShapeTensorInfo.GetDataType() == armnn::DataType::QAsymmU8)
725 {
726 for(unsigned int i=0; i < outputShapeTensorInfo.GetNumElements(); ++i)
727 {
728 outputShape[i] = static_cast<uint8_t*>(TfLiteOpaqueTensorData(tfLiteOutputShapeTensor))[i];
729 }
730 }
731
732 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
733 for (int dimension : outputShape)
734 {
735 descriptor.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
736 }
737 descriptor.m_OutputShapeEnabled = true;
738
739 // TfLite uses NHWC tensors
740 const unsigned int outputHeight = descriptor.m_OutputShape[1];
741 const unsigned int outputWidth = descriptor.m_OutputShape[2];
742
743 CalcPadding(inputHeight,
744 filterHeight,
745 descriptor.m_StrideY,
746 1, // DilationY
747 descriptor.m_PadTop,
748 descriptor.m_PadBottom,
749 parameters->padding,
750 outputHeight);
751
752 CalcPadding(inputWidth,
753 filterWidth,
754 descriptor.m_StrideX,
755 1, // DilationX
756 descriptor.m_PadLeft,
757 descriptor.m_PadRight,
758 parameters->padding,
759 outputWidth);
760 }
761 else
762 {
763 CalcPadding(inputHeight,
764 filterHeight,
765 descriptor.m_StrideY,
766 1, // DilationY
767 descriptor.m_PadTop,
768 descriptor.m_PadBottom,
769 parameters->padding);
770
771 CalcPadding(inputWidth,
772 filterWidth,
773 descriptor.m_StrideX,
774 1, // DilationX
775 descriptor.m_PadLeft,
776 descriptor.m_PadRight,
777 parameters->padding);
778 }
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100779
780 // Set up filter
781 auto filterTensor = CreateConstTensor(tfLiteFilterTensor,
782 filterTensorInfo);
783 armnn::BackendId setBackend;
784 if (!delegateData.m_Network)
785 {
786 bool isSupported = false;
787 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("TRANSPOSE_CONV2D",
788 tfLiteContext,
789 IsTransposeConvolution2dSupported,
790 delegateData.m_Backends,
791 isSupported,
792 setBackend,
793 inputTensorInfo,
794 outputTensorInfo,
795 descriptor,
796 filterTensorInfo,
797 armnn::EmptyOptional());
798 return isSupported ? kTfLiteOk : kTfLiteError;
799 }
800
Mike Kellya2806502023-08-03 10:42:11 +0100801 auto layerName = GetName(armnn::LayerType::TransposeConvolution2d, nodeIndex);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100802 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTransposeConvolution2dLayer(descriptor,
803 filterTensor,
Mike Kellya2806502023-08-03 10:42:11 +0100804 armnn::EmptyOptional(),
805 layerName.c_str());
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100806 layer->SetBackendId(setBackend);
807 ARMNN_ASSERT(layer != nullptr);
808
809 // The data input can be constant, so we must check that this is allocated to an input slot
810 if(inputTensorInfo.IsConstant())
811 {
812 auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
813
Mike Kellya2806502023-08-03 10:42:11 +0100814 auto inputName = GetName(armnn::LayerType::Constant, nodeIndex, "Input");
815 armnn::IConnectableLayer *inputLayer = delegateData.m_Network->AddConstantLayer(input, inputName.c_str());
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100816 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
817 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
818 }
819
820 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
821 outputSlot.SetTensorInfo(outputTensorInfo);
822
823
824 // Connect
825 if (delegateData.m_OutputSlotForNode[static_cast<unsigned int>(inputTensors[2])] != nullptr)
826 {
827 delegateData.m_OutputSlotForNode[static_cast<unsigned int>(inputTensors[2])]->
828 Connect(layer->GetInputSlot(0));
829 }
830
831 if (Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
832 {
833 return kTfLiteError;
834 }
835
836 return kTfLiteOk;
837}
838
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100839TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
840 TfLiteOpaqueContext* tfLiteContext,
841 TfLiteOpaqueNode* tfLiteNode,
842 int nodeIndex,
843 int32_t operatorCode)
844{
845 switch(operatorCode)
846 {
847 case kTfLiteBuiltinConv2d:
848 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100849 case kTfLiteBuiltinConv3d:
850 return VisitConv3dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100851 case kTfLiteBuiltinDepthwiseConv2d:
852 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Francis Murtagh3a9e7ba2023-04-26 15:58:39 +0100853 case kTfLiteBuiltinTransposeConv:
854 return VisitTransposeConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
Matthew Sloyan080ffd82023-04-24 12:53:04 +0100855 default:
856 return kTfLiteError;
857 }
858}
859
860}