blob: 2dbfa602fb1f937f2932d7fe76ecb532a0fb6d31 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
David Monahanc833cef2023-05-03 15:53:03 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9#include <DelegateUtils.hpp>
10
11#include <algorithm>
12#include <iterator>
13#include <vector>
14
15namespace armnnOpaqueDelegate
16{
17
18constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19
20TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
21 TfLiteOpaqueContext* tfLiteContext,
22 TfLiteOpaqueNode* tfLiteNode,
23 int nodeIndex,
24 int32_t tfLiteSplitOperatorCode)
25{
26 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
27
28 auto* splitParameters = reinterpret_cast<TfLiteSplitParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
29 int numSplits = NonNegative(splitParameters->num_splits, nodeIndex);
30
31 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
32
33 // Gather input indices and use to get Axis tensor.
34 const int* inputTensors;
35 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
36 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
37 {
38 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
39 tfLiteContext,
40 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
41 nodeIndex);
42 return kTfLiteError;
43 }
44
45 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
46 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitOperatorCode, nodeIndex))
47 {
48 return kTfLiteError;
49 }
50
51 // Use input indices to get input tensor.
52 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
53 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitOperatorCode, nodeIndex))
54 {
55 return kTfLiteError;
56 }
57
58 // Gather output indices and use to get output tensors.
59 const int* outputTensors;
60 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numSplits) != kTfLiteOk)
61 {
62 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
63 tfLiteContext,
64 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
65 nodeIndex);
66 return kTfLiteError;
67 }
68
69 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
70
Ryan OSheac229b3f2023-06-27 22:34:54 +010071 if (GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() != 1)
72 {
73 return kTfLiteError;
74 }
75
David Monahanc833cef2023-05-03 15:53:03 +010076 auto* axisTensorDataPtr = static_cast<uint32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
77 std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
78 int32_t axis = axisTensorData[0];
79
80 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
81 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
82 {
83 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
84 // E.g. Rank 4 tensor can have axis in range [-4, 3)
85 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
86 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
87 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +010088 "TfLiteOpaqueArmnnDelegate: Operation has invalid axis: #%d. "
89 "Axis must be in range [-n, n) in node #%d:",
David Monahanc833cef2023-05-03 15:53:03 +010090 axis, nodeIndex);
91 }
92 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
93
94 std::vector<armnn::TensorInfo> outputs;
95 for (int i = 0; i < numSplits; ++i)
96 {
97 const TfLiteOpaqueTensor* tfLiteOutputTensor =
98 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[i]);
99 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitOperatorCode, nodeIndex))
100 {
101 return kTfLiteError;
102 }
103 outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
104 }
105 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
106
107 auto inputDimSize = inputTensorInfo.GetNumDimensions();
108 if (inputDimSize > MaxNumOfTensorDimensions)
109 {
110 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
111 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +0100112 "TfLiteOpaqueArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be "
113 "greater than #%d in node #%d: ",
114 inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
David Monahanc833cef2023-05-03 15:53:03 +0100115 return kTfLiteError;
116 }
117
118 std::vector<unsigned int> splitterDimSizes(inputDimSize);
119
120 // Add current input shape to splitterDimSizes
121 for (unsigned int i = 0; i < inputDimSize; ++i)
122 {
123 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
124 }
125
126 if (splitterDimSizes[splitDim] % numSplits != 0)
127 {
128 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
129 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +0100130 "TfLiteOpaqueArmnnDelegate: Number of splits #%d must evenly divide the dimension #%d in node #%d: ",
David Monahanc833cef2023-05-03 15:53:03 +0100131 numSplits, splitterDimSizes[splitDim], nodeIndex);
132 return kTfLiteError;
133 }
134 splitterDimSizes[splitDim] /= numSplits;
135
136 armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
137 for (int j = 0; j < numSplits; ++j)
138 {
139 // Set the size of the views.
140 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
141 {
142 splitDescriptor.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
143 }
144 splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
145 }
146
147 armnn::BackendId setBackend;
148 if (!delegateData.m_Network)
149 {
150 // Check if supported
151 bool isSupported = false;
152 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPLIT",
153 tfLiteContext,
154 IsSplitterSupported,
155 delegateData.m_Backends,
156 isSupported,
157 setBackend,
158 inputTensorInfo,
159 outputTensorInfos,
160 splitDescriptor);
161 return isSupported ? kTfLiteOk : kTfLiteError;
162 }
163
Mike Kellya2806502023-08-03 10:42:11 +0100164 auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex);
165 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
David Monahanc833cef2023-05-03 15:53:03 +0100166 layer->SetBackendId(setBackend);
167 ARMNN_ASSERT(layer != nullptr);
168
169 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
170 {
171 layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
172 }
173
174 // Connect the input slots
Ryan OShea59f8f652023-05-11 20:37:53 +0100175 delegateData.m_OutputSlotForNode[inputTensors[1]]->Connect(layer->GetInputSlot(0));
176
177 if(numSplits != static_cast<int>(layer->GetNumOutputSlots()))
David Monahanc833cef2023-05-03 15:53:03 +0100178 {
Ryan OShea59f8f652023-05-11 20:37:53 +0100179 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
180 tfLiteContext,
181 "TfLiteOpaqueArmnnDelegate: Expected number of splits #%d does not "
182 "match the number of output slots #%d in node #%d: ",
183 numSplits, layer->GetNumOutputSlots(), nodeIndex);
David Monahanc833cef2023-05-03 15:53:03 +0100184 return kTfLiteError;
185 }
Ryan OShea59f8f652023-05-11 20:37:53 +0100186
187 // Prepare output slots
188 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
189 {
190 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
191 delegateData.m_OutputSlotForNode[
192 static_cast<unsigned long>(outputTensors[outputIndex])] = &outputSlot;
193 }
David Monahanc833cef2023-05-03 15:53:03 +0100194 return kTfLiteOk;
195}
196
197TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
198 TfLiteOpaqueContext* tfLiteContext,
199 TfLiteOpaqueNode* tfLiteNode,
200 int nodeIndex,
201 int32_t tfLiteSplitVOperatorCode)
202{
203
204 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
205
206 const int* inputTensors;
207 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
208 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
209 {
210 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
211 tfLiteContext,
212 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
213 nodeIndex);
214 return kTfLiteError;
215 }
216
217 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
218 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitVOperatorCode, nodeIndex))
219 {
220 return kTfLiteError;
221 }
222
223 const TfLiteOpaqueTensor* tfLiteSplitsTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
224 if (!IsValid(tfLiteContext, tfLiteSplitsTensor, tfLiteSplitVOperatorCode, nodeIndex))
225 {
226 return kTfLiteError;
227 }
228
229 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
230 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitVOperatorCode, nodeIndex))
231 {
232 return kTfLiteError;
233 }
234
235 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
236 const armnn::TensorInfo& splitsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteSplitsTensor);
Ryan OSheac229b3f2023-06-27 22:34:54 +0100237
238 if (splitsTensorInfo.GetNumDimensions() != 1)
239 {
240 return kTfLiteError;
241 }
242
243 if (GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() != 1)
244 {
245 return kTfLiteError;
246 }
David Monahanc833cef2023-05-03 15:53:03 +0100247
248 auto* axisTensorDataPtr = static_cast<uint32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
249 std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
250 int32_t axis = axisTensorData[0];
251
252 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
253 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
254 {
255 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
256 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +0100257 "TfLiteOpaqueArmnnDelegate: Operation has invalid axis: #%d. "
258 "Axis must be in range [-n, n) in node #%d:",
David Monahanc833cef2023-05-03 15:53:03 +0100259 axis, nodeIndex);
260 }
261 const unsigned int splitDim = ComputeWrappedIndex(axisTensorData[0], inputTensorInfo.GetNumDimensions());
262
263 auto* splitVParameters = reinterpret_cast<TfLiteSplitVParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
264 int numSplits = 0;
265 if (splitVParameters)
266 {
267 numSplits = NonNegative(splitVParameters->num_splits, nodeIndex);
268 }
269 else
270 {
271 numSplits = splitsTensorInfo.GetNumElements();
272 }
273
274 if (numSplits <= 0)
275 {
276 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
Ryan OShea59f8f652023-05-11 20:37:53 +0100277 tfLiteContext,
278 "TfLiteOpaqueArmnnDelegate: Invalid number of splits %d in node #%d",
David Monahanc833cef2023-05-03 15:53:03 +0100279 numSplits, nodeIndex);
280 return kTfLiteError;
281 }
282
283 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
284
285 // Gather output indices and use to get output tensors.
286 const int* outputTensors;
287 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numSplits) != kTfLiteOk)
288 {
289 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
290 tfLiteContext,
291 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
292 nodeIndex);
293 return kTfLiteError;
294 }
295 std::vector<armnn::TensorInfo> outputs;
296 for (int i = 0; i < numSplits; ++i)
297 {
298 const TfLiteOpaqueTensor* tfLiteOutputTensor =
299 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[i]);
300 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitVOperatorCode, nodeIndex))
301 {
302 return kTfLiteError;
303 }
304 outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
305 }
306 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
307
308 auto inputDimSize = inputTensorInfo.GetNumDimensions();
309 if (inputDimSize > MaxNumOfTensorDimensions)
310 {
311 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
312 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +0100313 "TfLiteOpaqueArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be "
314 "greater than #%d in node #%d: ",
315 inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
David Monahanc833cef2023-05-03 15:53:03 +0100316 return kTfLiteError;
317 }
318
319 std::vector<int32_t> splitsTensorData(numSplits);
320 std::memcpy(splitsTensorData.data(), TfLiteOpaqueTensorData(tfLiteSplitsTensor), splitsTensorInfo.GetNumBytes());
321
322
323 unsigned int index = 0;
324 unsigned int inferredIndex = 0;
325 int numberOfInferred = 0;
326 int splitSum = 0;
327
328 for (auto splitData : splitsTensorData)
329 {
330 if (splitData < 0)
331 {
332 ++numberOfInferred;
333 inferredIndex = index;
334 }
335 else
336 {
337 splitSum += splitData;
338 }
339 ++index;
340 }
341
342 // Check for inferred axis
343 if (numberOfInferred == 0)
344 {
345 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
346 {
347 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
Ryan OShea59f8f652023-05-11 20:37:53 +0100348 tfLiteContext,
349 "TfLiteOpaqueArmnnDelegate: SplitV split_sizes does not sum to the dimension "
350 "of value along split_dim in node #%d",
351 nodeIndex);
David Monahanc833cef2023-05-03 15:53:03 +0100352 return kTfLiteError;
353 }
354 }
355 else if (numberOfInferred == 1)
356 {
357 splitsTensorData[inferredIndex] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
358 }
359 else
360 {
361 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
Ryan OShea59f8f652023-05-11 20:37:53 +0100362 tfLiteContext,
363 "TfLiteOpaqueArmnnDelegate: SplitV cannot infer split size for "
364 "more than one split in node #%d",
David Monahanc833cef2023-05-03 15:53:03 +0100365 nodeIndex);
366 return kTfLiteError;
367 }
368
369 armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
370 unsigned int accumSplit = 0;
371 for (int j = 0; j < numSplits; ++j)
372 {
373 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsTensorData[j]);
374
375 // Set the size of the views.
376 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
377 {
378 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
379 if (dimIdx == splitDim)
380 {
381 dimSize = splitSize;
382 }
383 splitDescriptor.SetViewSize(j, dimIdx, dimSize);
384 }
385
386 splitDescriptor.SetViewOriginCoord(j, splitDim, accumSplit);
387 accumSplit += splitSize;
388 }
389
390 armnn::BackendId setBackend;
391 if (!delegateData.m_Network)
392 {
393 // Check if supported
394 bool isSupported = false;
395 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPLITV",
396 tfLiteContext,
397 IsSplitterSupported,
398 delegateData.m_Backends,
399 isSupported,
400 setBackend,
401 inputTensorInfo,
402 outputTensorInfos,
403 splitDescriptor);
404 return isSupported ? kTfLiteOk : kTfLiteError;
405 }
406
Mike Kellya2806502023-08-03 10:42:11 +0100407 auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex);
408 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
David Monahanc833cef2023-05-03 15:53:03 +0100409 layer->SetBackendId(setBackend);
410 ARMNN_ASSERT(layer != nullptr);
411
412 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
413 {
414 layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
415 }
416
417 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100418 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
David Monahanc833cef2023-05-03 15:53:03 +0100419 {
420 return kTfLiteError;
421 }
422
423 // Connect
424 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
425}
426
427} // namespace armnnOpaqueDelegate