blob: 163290b54256228922d7636cf119645ef6b7b13b [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Matthew Sloyan080ffd82023-04-24 12:53:04 +01005
6#include <OpaqueDelegateUtils.hpp>
7#include <SharedFunctions.hpp>
8
9#include <tensorflow/lite/builtin_ops.h>
10#include <tensorflow/lite/c/builtin_op_data.h>
11#include <tensorflow/lite/c/common.h>
12#include <tensorflow/lite/minimal_logging.h>
13
14namespace armnnOpaqueDelegate
15{
16
17TfLiteStatus VisitConv2dOperator(DelegateData& delegateData,
18 TfLiteOpaqueContext* tfLiteContext,
19 TfLiteOpaqueNode* tfLiteNode,
20 int nodeIndex,
21 int32_t operatorCode)
22{
23 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
24 if (numInputs < 2)
25 {
26 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
27 tfLiteContext,
28 "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
29 2, numInputs, nodeIndex);
30 return kTfLiteError;
31 }
32 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
33
34 // Gather input indices and use to get input tensor.
35 const int* inputTensors;
36 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
37 {
38 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
39 tfLiteContext,
40 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
41 nodeIndex);
42 return kTfLiteError;
43 }
44
45 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
46 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
47 {
48 return kTfLiteError;
49 }
50 if (IsDynamicTensor(tfLiteInputTensor))
51 {
52 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
53 tfLiteContext,
54 "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
55 operatorCode, nodeIndex);
56 return kTfLiteError;
57 }
58
59 // Use input indices to get filter tensor.
60 const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
61 if(!IsValid(tfLiteFilterTensor))
62 {
63 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
64 tfLiteContext,
65 "TfLiteArmnnOpaqueDelegate: Invalid filter tensor in operator #%d node #%d: ",
66 operatorCode, nodeIndex);
67 return kTfLiteError;
68 }
69 if (IsDynamicTensor(tfLiteFilterTensor))
70 {
71 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
72 tfLiteContext,
73 "TfLiteArmnnOpaqueDelegate: Dynamic filter tensors are not supported in node #%d: ",
74 nodeIndex);
75 return kTfLiteError;
76 }
77
78 // Gather output indices and use to get output tensors.
79 int numOutputs = 0;
80 const int* outputTensors;
81 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
82 {
83 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
84 tfLiteContext,
85 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
86 nodeIndex);
87 return kTfLiteError;
88 }
89
90 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
91 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
92 {
93 return kTfLiteError;
94 }
95 if (IsDynamicTensor(tfLiteOutputTensor))
96 {
97 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
98 tfLiteContext,
99 "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
100 operatorCode, nodeIndex);
101 return kTfLiteError;
102 }
103
104 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
105 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
106 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
107
108 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteConvParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
109 TfLiteFusedActivation activationType = kTfLiteActNone;
110 if (tfLiteNodeParameters)
111 {
112 activationType = tfLiteNodeParameters->activation;
113 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
114 tfLiteContext,
115 outputTensorInfo,
116 outputTensorInfo,
117 activationType);
118 if(activationStatus != kTfLiteOk)
119 {
120 return kTfLiteError;
121 }
122 }
123
124 armnn::TensorInfo biasTensorInfo;
125 const TfLiteOpaqueTensor* tfLiteBiasTensor = nullptr;
126
127 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
128 if(biasEnabled)
129 {
130 // Use input indices to get bias tensor.
131 tfLiteBiasTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
132 if(!IsValid(tfLiteBiasTensor))
133 {
134 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
135 tfLiteContext,
136 "TfLiteArmnnOpaqueDelegate: Invalid bias tensor in operator #%d node #%d: ",
137 operatorCode, nodeIndex);
138 return kTfLiteError;
139 }
140 if (IsDynamicTensor(tfLiteBiasTensor))
141 {
142 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
143 tfLiteContext,
144 "TfLiteArmnnOpaqueDelegate: Dynamic bias tensors are not supported in node #%d: ",
145 nodeIndex);
146 return kTfLiteError;
147 }
148 biasTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBiasTensor);
149 }
150 else
151 {
152 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
153 }
154
155 armnn::Optional<armnn::TensorInfo> optionalBiasInfo(biasTensorInfo);
156
157 armnn::Convolution2dDescriptor descriptor;
158 descriptor.m_BiasEnabled = biasEnabled;
159 descriptor.m_StrideX = NonNegative(tfLiteNodeParameters->stride_width, nodeIndex);
160 descriptor.m_StrideY = NonNegative(tfLiteNodeParameters->stride_height, nodeIndex);
161 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
162 descriptor.m_DilationX = NonNegative(tfLiteNodeParameters->dilation_width_factor, nodeIndex);
163 descriptor.m_DilationY = NonNegative(tfLiteNodeParameters->dilation_height_factor, nodeIndex);
164
165 // TfLite uses NHWC tensors
166 const unsigned int inputHeight = inputTensorInfo.GetShape()[1];
167 const unsigned int inputWidth = inputTensorInfo.GetShape()[2];
168
169 const unsigned int filterHeight = filterTensorInfo.GetShape()[1];
170 const unsigned int filterWidth = filterTensorInfo.GetShape()[2];
171
172 // Calculate padding
173 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
174 descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
175 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
176 descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
177
178 armnn::BackendId setBackend;
179 if (!delegateData.m_Network)
180 {
181 bool isSupported = false;
182 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONV2D",
183 tfLiteContext,
184 IsConvolution2dSupported,
185 delegateData.m_Backends,
186 isSupported,
187 setBackend,
188 inputTensorInfo,
189 outputTensorInfo,
190 descriptor,
191 filterTensorInfo,
192 optionalBiasInfo);
193 return isSupported ? kTfLiteOk : kTfLiteError;
194 }
195
196 // Set up filter and biases
197 armnn::IConnectableLayer* layer = delegateData.m_Network->AddConvolution2dLayer(descriptor);
198 layer->SetBackendId(setBackend);
199
200 if(filterTensorInfo.IsConstant())
201 {
202 auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
203
204 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
205 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
206 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
207 }
208
209 if (biasEnabled)
210 {
211 if(biasTensorInfo.IsConstant())
212 {
213 auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
214 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
215 ARMNN_ASSERT(biasLayer != nullptr);
216 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
217 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
218 }
219 }
220
221 // The data input can also be constant, so we must check that this is also allocated to an input slot
222 if(inputTensorInfo.IsConstant())
223 {
224 auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
225
226 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
227 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
228 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
229 }
230
231 ARMNN_ASSERT(layer != nullptr);
232
233 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
234 outputSlot.SetTensorInfo(outputTensorInfo);
235
236 if(Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
237 {
238 return kTfLiteError;
239 }
240
241 if (!tfLiteNodeParameters)
242 {
243 // No Activation
244 return kTfLiteOk;
245 }
246
247 // Check and Create activation
248 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
249}
250
251TfLiteStatus VisitDepthwiseConv2dOperator(DelegateData& delegateData,
252 TfLiteOpaqueContext* tfLiteContext,
253 TfLiteOpaqueNode* tfLiteNode,
254 int nodeIndex,
255 int32_t operatorCode)
256{
257 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
258 if (numInputs < 2)
259 {
260 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
261 tfLiteContext,
262 "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
263 2, numInputs, nodeIndex);
264 return kTfLiteError;
265 }
266 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
267
268 // Gather input indices and use to get input tensor.
269 const int* inputTensors;
270 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
271 {
272 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
273 tfLiteContext,
274 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
275 nodeIndex);
276 return kTfLiteError;
277 }
278
279 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
280 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
281 {
282 return kTfLiteError;
283 }
284 if (IsDynamicTensor(tfLiteInputTensor))
285 {
286 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
287 tfLiteContext,
288 "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
289 operatorCode, nodeIndex);
290 return kTfLiteError;
291 }
292
293 // Use input indices to get filter tensor.
294 const TfLiteOpaqueTensor* tfLiteFilterTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
295 if(!IsValid(tfLiteFilterTensor))
296 {
297 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
298 tfLiteContext,
299 "TfLiteArmnnOpaqueDelegate: Invalid filter tensor in operator #%d node #%d: ",
300 operatorCode, nodeIndex);
301 return kTfLiteError;
302 }
303 if (IsDynamicTensor(tfLiteFilterTensor))
304 {
305 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
306 tfLiteContext,
307 "TfLiteArmnnOpaqueDelegate: Dynamic filter tensors are not supported in node #%d: ",
308 nodeIndex);
309 return kTfLiteError;
310 }
311
312 // Gather output indices and use to get output tensors.
313 int numOutputs = 0;
314 const int* outputTensors;
315 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
316 {
317 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
318 tfLiteContext,
319 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
320 nodeIndex);
321 return kTfLiteError;
322 }
323
324 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
325 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
326 {
327 return kTfLiteError;
328 }
329 if (IsDynamicTensor(tfLiteOutputTensor))
330 {
331 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
332 tfLiteContext,
333 "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
334 operatorCode, nodeIndex);
335 return kTfLiteError;
336 }
337
338 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
339 const armnn::TensorInfo& filterTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteFilterTensor);
340 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
341
342 auto* tfLiteNodeParameters =
343 reinterpret_cast<TfLiteDepthwiseConvParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
344
345 TfLiteFusedActivation activationType = kTfLiteActNone;
346 if (tfLiteNodeParameters)
347 {
348 activationType = tfLiteNodeParameters->activation;
349 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData,
350 tfLiteContext,
351 outputTensorInfo,
352 outputTensorInfo,
353 activationType);
354 if(activationStatus != kTfLiteOk)
355 {
356 return kTfLiteError;
357 }
358 }
359
360 armnn::TensorInfo biasTensorInfo;
361 const TfLiteOpaqueTensor* tfLiteBiasTensor = nullptr;
362
363 bool biasEnabled = IsOptionalOperandPresent(tfLiteNode, 2);
364 if(biasEnabled)
365 {
366 // Use input indices to get bias tensor.
367 tfLiteBiasTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
368 if(!IsValid(tfLiteBiasTensor))
369 {
370 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
371 tfLiteContext,
372 "TfLiteArmnnOpaqueDelegate: Invalid bias tensor in operator #%d node #%d: ",
373 operatorCode, nodeIndex);
374 return kTfLiteError;
375 }
376 if (IsDynamicTensor(tfLiteBiasTensor))
377 {
378 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
379 tfLiteContext,
380 "TfLiteArmnnOpaqueDelegate: Dynamic bias tensors are not supported in node #%d: ",
381 nodeIndex);
382 return kTfLiteError;
383 }
384 biasTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteBiasTensor);
385 }
386 else
387 {
388 biasTensorInfo = armnn::TensorInfo(armnn::TensorShape({1}), GetDataType(tfLiteInputTensor));
389 }
390
391 armnn::DepthwiseConvolution2dDescriptor descriptor;
392 descriptor.m_BiasEnabled = biasEnabled;
393 descriptor.m_StrideX = NonNegative(tfLiteNodeParameters->stride_width, nodeIndex);
394 descriptor.m_StrideY = NonNegative(tfLiteNodeParameters->stride_height, nodeIndex);
395 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
396 descriptor.m_DilationX = NonNegative(tfLiteNodeParameters->dilation_width_factor, nodeIndex);
397 descriptor.m_DilationY = NonNegative(tfLiteNodeParameters->dilation_height_factor, nodeIndex);
398
399 // Assuming input is NHWC
400 unsigned int inputHeight = inputTensorInfo.GetShape()[1];
401 unsigned int inputWidth = inputTensorInfo.GetShape()[2];
402
403 // TensorflowLite weights come in the format [1, H, W, I * M]
404 unsigned int filterHeight = filterTensorInfo.GetShape()[1];
405 unsigned int filterWidth = filterTensorInfo.GetShape()[2];
406
407 // Calculate padding
408 CalcPadding(inputHeight, filterHeight, descriptor.m_StrideY, descriptor.m_DilationY,
409 descriptor.m_PadTop, descriptor.m_PadBottom, tfLiteNodeParameters->padding);
410 CalcPadding(inputWidth, filterWidth, descriptor.m_StrideX, descriptor.m_DilationX,
411 descriptor.m_PadLeft, descriptor.m_PadRight, tfLiteNodeParameters->padding);
412
413 armnn::BackendId setBackend;
414 if (!delegateData.m_Network)
415 {
416 bool isSupported = false;
417 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEPTHWISE_CONV2D",
418 tfLiteContext,
419 IsDepthwiseConvolutionSupported,
420 delegateData.m_Backends,
421 isSupported,
422 setBackend,
423 inputTensorInfo,
424 outputTensorInfo,
425 descriptor,
426 filterTensorInfo,
427 armnn::Optional<armnn::TensorInfo>(biasTensorInfo));
428 return isSupported ? kTfLiteOk : kTfLiteError;
429 }
430
431 armnn::IConnectableLayer* layer = delegateData.m_Network->AddDepthwiseConvolution2dLayer(descriptor);
432 layer->SetBackendId(setBackend);
433
434 if(filterTensorInfo.IsConstant())
435 {
436 // For depthwise the weights layout is the same as for tflite [1, H, W, I*M]. No permutation required.
437 auto filter = CreateConstTensor(tfLiteFilterTensor, filterTensorInfo);
438
439 armnn::IConnectableLayer* weightsLayer = delegateData.m_Network->AddConstantLayer(filter);
440 weightsLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1u));
441 weightsLayer->GetOutputSlot(0).SetTensorInfo(filterTensorInfo);
442 }
443
444 if (biasEnabled)
445 {
446 if(biasTensorInfo.IsConstant())
447 {
448 auto biasTensor = CreateConstTensor(tfLiteBiasTensor, biasTensorInfo);
449
450 armnn::IConnectableLayer* biasLayer = delegateData.m_Network->AddConstantLayer(biasTensor);
451 ARMNN_ASSERT(biasLayer != nullptr);
452 biasLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(2u));
453 biasLayer->GetOutputSlot(0).SetTensorInfo(biasTensorInfo);
454 }
455 }
456
457 // The data input can also be constant, so we must check that this is also allocated to an input slot
458 if(inputTensorInfo.IsConstant())
459 {
460 auto input = CreateConstTensor(tfLiteInputTensor, inputTensorInfo);
461
462 armnn::IConnectableLayer* inputLayer = delegateData.m_Network->AddConstantLayer(input);
463 inputLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0u));
464 inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
465 }
466
467 ARMNN_ASSERT(layer != nullptr);
468
469 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
470 outputSlot.SetTensorInfo(outputTensorInfo);
471
472 if(Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
473 {
474 return kTfLiteError;
475 }
476
477 if (!tfLiteNodeParameters)
478 {
479 // No Activation
480 return kTfLiteOk;
481 }
482 // Check and create activation
483 return FusedActivation(tfLiteContext, tfLiteNode, activationType, layer, 0, delegateData);
484}
485
486TfLiteStatus VisitConvolutionOperator(DelegateData& delegateData,
487 TfLiteOpaqueContext* tfLiteContext,
488 TfLiteOpaqueNode* tfLiteNode,
489 int nodeIndex,
490 int32_t operatorCode)
491{
492 switch(operatorCode)
493 {
494 case kTfLiteBuiltinConv2d:
495 return VisitConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
496 case kTfLiteBuiltinDepthwiseConv2d:
497 return VisitDepthwiseConv2dOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
498 default:
499 return kTfLiteError;
500 }
501}
502
503}