blob: 6319ca784188ea0362fdbdb6545a96d57de281da [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Ryan OSheaa37ccb02023-04-11 10:54:07 +01005#pragma once
6
Matthew Sloyanc49aacc2023-04-28 17:27:26 +01007#include <OpaqueDelegateUtils.hpp>
Ryan OSheaa37ccb02023-04-11 10:54:07 +01008
9namespace armnnOpaqueDelegate
10{
11
12TfLiteStatus VisitCastOperator(DelegateData& delegateData,
13 TfLiteOpaqueContext* tfLiteContext,
14 TfLiteOpaqueNode* tfLiteNode,
15 int nodeIndex,
16 int32_t operatorCode)
17{
18 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
19 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
20 int numInputs = 0;
21 const int* inputTensors;
22 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
23 {
24 return kTfLiteError;
25 }
26
27 // This layer only has 1 input, so we can directly assign tensor[0] to a new opaque tensor
28 const TfLiteOpaqueTensor*
29 tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[numInputs-1]);
30 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
31 {
32 return kTfLiteError;
33 }
34
35 int numOutputs = 0;
36 const int* outputTensors;
37 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
38 {
39 return kTfLiteError;
40 }
41
42 // This layer only has 1 output, so we can directly assign tensor[0] to a new opaque tensor
43 const TfLiteOpaqueTensor*
44 tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[numOutputs-1]);
45 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
46 {
47 return kTfLiteError;
48 }
49
50 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
51 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
52
53 bool isSupported = false;
54 armnn::BackendId setBackend;
55 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) {
56 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CAST",
Matthew Sloyanc49aacc2023-04-28 17:27:26 +010057 tfLiteContext,
58 IsCastSupported,
59 delegateData.m_Backends,
60 isSupported,
61 setBackend,
62 inputTensorInfo,
63 outInfo);
Ryan OSheaa37ccb02023-04-11 10:54:07 +010064 };
65
66 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
67 // support for the operator
68 // If supported, VisitCastOperator will be called again to add the layer to the network as seen further below
69 if (!delegateData.m_Network)
70 {
71 validateFunc(outputTensorInfo, isSupported);
72 return isSupported ? kTfLiteOk : kTfLiteError;
73 }
74
75 // Add a Cast layer
Mike Kellya2806502023-08-03 10:42:11 +010076 auto layerName = GetName(armnn::LayerType::Cast, nodeIndex);
77 armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer(layerName.c_str());
Ryan OSheaa37ccb02023-04-11 10:54:07 +010078 layer->SetBackendId(setBackend);
79 ARMNN_ASSERT(layer != nullptr);
80
81 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
82 outputSlot.SetTensorInfo(outputTensorInfo);
83
84 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +010085 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Ryan OSheaa37ccb02023-04-11 10:54:07 +010086 {
87 return kTfLiteError;
88 }
89
90 // Connect
91 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
92}
Matthew Sloyanc49aacc2023-04-28 17:27:26 +010093
94TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
95 TfLiteOpaqueContext* tfLiteContext,
96 TfLiteOpaqueNode* tfLiteNode,
97 int nodeIndex,
98 int32_t operatorCode)
99{
100 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
101
102 if (numInputs == 2)
103 {
104 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
105 }
106 else
107 {
108 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
109 }
110 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
111
112 // Gather input indices and use to get input tensor.
113 const int* inputTensors;
114 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
115 {
116 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
117 tfLiteContext,
118 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
119 nodeIndex);
120 return kTfLiteError;
121 }
122
123 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
124 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
125 {
126 return kTfLiteError;
127 }
128
129 // Gather output indices and use to get output tensors.
130 int numOutputs = 0;
131 const int* outputTensors;
132 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
133 {
134 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
135 tfLiteContext,
136 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
137 nodeIndex);
138 return kTfLiteError;
139 }
140
141 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
142 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
143 {
144 return kTfLiteError;
145 }
146
147 const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
148 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
149
150 armnn::ReshapeDescriptor reshapeDesc;
151 std::vector<int32_t> targetShape;
152
153 auto* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
154
155 // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
156 // Options might be set without valid data. we need to check the dimensions are in a valid range.
157 if (reshapeOptions && reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8)
158 {
159 for (int i = 0; i < reshapeOptions->num_dimensions; ++i)
160 {
161 targetShape.push_back(reshapeOptions->shape[i]);
162 }
163 }
164 else if (numInputs == 2)
165 {
166 // Get shape from the second input tensor
167 const TfLiteOpaqueTensor* tfLiteShapeInputTensor =
168 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
169 if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex))
170 {
171 return kTfLiteError;
172 }
173
174 int32_t numDims = TfLiteOpaqueTensorNumDims(tfLiteShapeInputTensor);
175 if (numDims != 1)
176 {
177 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
178 tfLiteContext,
179 "TfLiteArmnnOpaqueDelegate: Target 'shape' input is not a 1D tensor in "
180 "operator #%d node #%d: Falling back to TfLiteOptions.",
181 operatorCode, nodeIndex);
182 }
183 else
184 {
185 // Get the shape data out of the input tensor
186 auto* shapeTensorDataPtr = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteShapeInputTensor));
187 int32_t shapeTensorNumValues = TfLiteOpaqueTensorDim(tfLiteShapeInputTensor, 0);
188 for (int32_t i = 0; i < shapeTensorNumValues; ++i)
189 {
190 targetShape.push_back(shapeTensorDataPtr[i]);
191 }
192 }
193 }
194 else
195 {
196 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
197 tfLiteContext,
198 "TfLiteArmnnOpaqueDelegate: Target shape not defined in reshape parameters or input tensor. "
199 "At least one method required in operator #%d node #%d: ",
200 operatorCode, nodeIndex);
201 return kTfLiteError;
202 }
203
Tianle Cheng20773482023-10-03 12:01:11 +0100204 // Check the target shape to check if there is zero in the shape.
205 if (std::find(targetShape.begin(), targetShape.end(), 0) != targetShape.end() &&
206 inputTensorInfo0.GetNumElements() != 0)
207 {
208 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
209 tfLiteContext,
210 "TfLiteArmnnOpaqueDelegate: Input to reshape is a tensor with elements, "
211 "but the requested shape has 0. "
212 "operator #%d node #%d: ",
213 operatorCode, nodeIndex);
214 return kTfLiteError;
215 }
216
Matthew Sloyanc49aacc2023-04-28 17:27:26 +0100217 // Use the data to create the required tensor shape.
218 if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
219 {
220 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
221 tfLiteContext,
222 "TfLiteArmnnOpaqueDelegate: At most one component of shape can be -1 in: "
223 "operator #%d node #%d: ",
224 operatorCode, nodeIndex);
225 return kTfLiteError;
226 }
227
228 if (reshapeDesc.m_TargetShape.GetNumElements() != inputTensorInfo0.GetNumElements())
229 {
230 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
231 tfLiteContext,
232 "TfLiteArmnnOpaqueDelegate: Reshape, number of elements in output shape does not match input "
233 "operator #%d node #%d: ",
234 operatorCode, nodeIndex);
235 return kTfLiteError;
236 }
237
238 bool isSupported = false;
239 armnn::BackendId setBackend;
240 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
241 {
242 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
243 tfLiteContext,
244 IsReshapeSupported,
245 delegateData.m_Backends,
246 isSupported,
247 setBackend,
248 inputTensorInfo0,
249 outInfo,
250 reshapeDesc);
251 };
252
253 if (!delegateData.m_Network)
254 {
255 validateFunc(outputTensorInfo, isSupported);
256 return isSupported ? kTfLiteOk : kTfLiteError;
257 }
258
Mike Kellya2806502023-08-03 10:42:11 +0100259 auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex);
260 armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Matthew Sloyanc49aacc2023-04-28 17:27:26 +0100261 layer->SetBackendId(setBackend);
262 ARMNN_ASSERT(layer != nullptr);
263
264 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
265 outputSlot.SetTensorInfo(outputTensorInfo);
266
267 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100268 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Matthew Sloyanc49aacc2023-04-28 17:27:26 +0100269 {
270 return kTfLiteError;
271 }
272
273 // Connect
274 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
275}
276
Matthew Sloyan3504e422023-05-03 13:53:02 +0100277TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
278 TfLiteOpaqueContext* tfLiteContext,
279 TfLiteOpaqueNode* tfLiteNode,
280 int nodeIndex,
281 int32_t operatorCode)
282{
283 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
284 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
285
286 // Gather input indices and use to get input tensor.
287 int numInputs = 0;
288 const int* inputTensors;
289 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
290 {
291 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
292 tfLiteContext,
293 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
294 nodeIndex);
295 return kTfLiteError;
296 }
297
298 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
299 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
300 {
301 return kTfLiteError;
302 }
303
304 // Gather output indices and use to get output tensors.
305 int numOutputs = 0;
306 const int* outputTensors;
307 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
308 {
309 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
310 tfLiteContext,
311 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
312 nodeIndex);
313 return kTfLiteError;
314 }
315
316 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
317 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
318 {
319 return kTfLiteError;
320 }
321
322 auto* options = reinterpret_cast<TfLiteSqueezeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
323
324 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
325
326 std::vector<uint32_t> squeezeDim;
327 // A single negative dim index is interpreted as a negative index in python
328 // Meaning the index will be the shape size plus the negative index value
329 if (options->num_squeeze_dims == 1 && options->squeeze_dims[0] < 0)
330 {
331 int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
332 squeezeDim.push_back(static_cast<uint32_t>(dim));
333 }
334 else
335 {
336 for (int32_t i = 0; i < options->num_squeeze_dims; ++i)
337 {
338 squeezeDim.push_back(static_cast<uint32_t>(options->squeeze_dims[i]));
339 }
340 }
341
342 armnn::TensorInfo outputTensorInfo = OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
343
344 armnn::ReshapeDescriptor reshapeDesc;
345 reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
346
347 bool isSupported = false;
348 armnn::BackendId setBackend;
349 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
350 {
351 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SQUEEZE",
352 tfLiteContext,
353 IsReshapeSupported,
354 delegateData.m_Backends,
355 isSupported,
356 setBackend,
357 inputTensorInfo,
358 outInfo,
359 reshapeDesc);
360 };
361
362 if (!delegateData.m_Network)
363 {
364 validateFunc(outputTensorInfo, isSupported);
365 return isSupported ? kTfLiteOk : kTfLiteError;
366 }
367
Mike Kellya2806502023-08-03 10:42:11 +0100368 auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex, "Squeeze");
369 armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Matthew Sloyan3504e422023-05-03 13:53:02 +0100370 layer->SetBackendId(setBackend);
371 ARMNN_ASSERT(layer != nullptr);
372
373 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
374 outputSlot.SetTensorInfo(outputTensorInfo);
375
376 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100377 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Matthew Sloyan3504e422023-05-03 13:53:02 +0100378 {
379 return kTfLiteError;
380 }
381
382 // Connect
383 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
384}
385
386TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
387 TfLiteOpaqueContext* tfLiteContext,
388 TfLiteOpaqueNode* tfLiteNode,
389 int nodeIndex,
390 int32_t operatorCode)
391{
392 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
393 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
394
395 // Gather input indices and use to get input tensor.
396 int numInputs = 0;
397 const int* inputTensors;
398 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
399 {
400 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
401 tfLiteContext,
402 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
403 nodeIndex);
404 return kTfLiteError;
405 }
406
407 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
408 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
409 {
410 return kTfLiteError;
411 }
412
413 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
414 if (!IsValid(tfLiteContext, tfLiteAxisTensor, operatorCode, nodeIndex))
415 {
416 return kTfLiteError;
417 }
418
419 // Gather output indices and use to get output tensors.
420 int numOutputs = 0;
421 const int* outputTensors;
422 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
423 {
424 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
425 tfLiteContext,
426 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
427 nodeIndex);
428 return kTfLiteError;
429 }
430
431 TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
432 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
433 {
434 return kTfLiteError;
435 }
436
437 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
438 armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor);
439
440 auto* axisTensorData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
441 int32_t axis = axisTensorData[0];
442
443 int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
444 if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
445 {
446 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
447 tfLiteContext,
448 "TfLiteArmnnOpaqueDelegate: Axis must be in range "
449 "[0 - (inputDimSize + 1), inputDimSize] inclusive.");
450 return kTfLiteError;
451 }
452
453 if(axis < 0)
454 {
455 axis = inputDimSize + axis + 1;
456 }
457
458 std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
459 unsigned int inputShapeIndex = 0;
460 for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
461 {
462 if (i == static_cast<unsigned int>(axis))
463 {
464 shape[i] = 1;
465 }
466 else
467 {
468 shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
469 ++inputShapeIndex;
470 }
471 }
472
473 armnn::ReshapeDescriptor reshapeDesc;
474 reshapeDesc.m_TargetShape = armnn::TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
475
476 bool isSupported = false;
477 armnn::BackendId setBackend;
478 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
479 {
480 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("EXPAND_DIMS",
481 tfLiteContext,
482 IsReshapeSupported,
483 delegateData.m_Backends,
484 isSupported,
485 setBackend,
486 inputTensorInfo,
487 outInfo,
488 reshapeDesc);
489 };
490
491 if (!delegateData.m_Network)
492 {
493 validateFunc(outputTensorInfo, isSupported);
494 return isSupported ? kTfLiteOk : kTfLiteError;
495 }
496
Mike Kellya2806502023-08-03 10:42:11 +0100497 auto layerName = GetName(armnn::LayerType::Reshape, nodeIndex, "ExpandDims");
498 armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
Matthew Sloyan3504e422023-05-03 13:53:02 +0100499 layer->SetBackendId(setBackend);
500 ARMNN_ASSERT(layer != nullptr);
501
502 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
503 outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
504 outputSlot.SetTensorInfo(outputTensorInfo);
505
506 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100507 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Matthew Sloyan3504e422023-05-03 13:53:02 +0100508 {
509 return kTfLiteError;
510 }
511
512 // Connect
513 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
514}
515
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100516}