blob: aec0fb674a8a0b2400a8d5eb0c46af0f929ef782 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
David Monahanc833cef2023-05-03 15:53:03 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9#include <DelegateUtils.hpp>
10
11#include <algorithm>
12#include <iterator>
13#include <vector>
14
15namespace armnnOpaqueDelegate
16{
17
18constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19
20TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
21 TfLiteOpaqueContext* tfLiteContext,
22 TfLiteOpaqueNode* tfLiteNode,
23 int nodeIndex,
24 int32_t tfLiteSplitOperatorCode)
25{
26 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
27
28 auto* splitParameters = reinterpret_cast<TfLiteSplitParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
29 int numSplits = NonNegative(splitParameters->num_splits, nodeIndex);
30
31 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
32
33 // Gather input indices and use to get Axis tensor.
34 const int* inputTensors;
35 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
36 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
37 {
38 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
39 tfLiteContext,
40 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
41 nodeIndex);
42 return kTfLiteError;
43 }
44
45 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
46 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitOperatorCode, nodeIndex))
47 {
48 return kTfLiteError;
49 }
50
51 // Use input indices to get input tensor.
52 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
53 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitOperatorCode, nodeIndex))
54 {
55 return kTfLiteError;
56 }
57
58 // Gather output indices and use to get output tensors.
59 const int* outputTensors;
60 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numSplits) != kTfLiteOk)
61 {
62 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
63 tfLiteContext,
64 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
65 nodeIndex);
66 return kTfLiteError;
67 }
68
69 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
70
71 ARMNN_ASSERT(GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() == 1);
72 auto* axisTensorDataPtr = static_cast<uint32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
73 std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
74 int32_t axis = axisTensorData[0];
75
76 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
77 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
78 {
79 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
80 // E.g. Rank 4 tensor can have axis in range [-4, 3)
81 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
82 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
83 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +010084 "TfLiteOpaqueArmnnDelegate: Operation has invalid axis: #%d. "
85 "Axis must be in range [-n, n) in node #%d:",
David Monahanc833cef2023-05-03 15:53:03 +010086 axis, nodeIndex);
87 }
88 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
89
90 std::vector<armnn::TensorInfo> outputs;
91 for (int i = 0; i < numSplits; ++i)
92 {
93 const TfLiteOpaqueTensor* tfLiteOutputTensor =
94 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[i]);
95 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitOperatorCode, nodeIndex))
96 {
97 return kTfLiteError;
98 }
99 outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
100 }
101 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
102
103 auto inputDimSize = inputTensorInfo.GetNumDimensions();
104 if (inputDimSize > MaxNumOfTensorDimensions)
105 {
106 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
107 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +0100108 "TfLiteOpaqueArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be "
109 "greater than #%d in node #%d: ",
110 inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
David Monahanc833cef2023-05-03 15:53:03 +0100111 return kTfLiteError;
112 }
113
114 std::vector<unsigned int> splitterDimSizes(inputDimSize);
115
116 // Add current input shape to splitterDimSizes
117 for (unsigned int i = 0; i < inputDimSize; ++i)
118 {
119 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
120 }
121
122 if (splitterDimSizes[splitDim] % numSplits != 0)
123 {
124 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
125 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +0100126 "TfLiteOpaqueArmnnDelegate: Number of splits #%d must evenly divide the dimension #%d in node #%d: ",
David Monahanc833cef2023-05-03 15:53:03 +0100127 numSplits, splitterDimSizes[splitDim], nodeIndex);
128 return kTfLiteError;
129 }
130 splitterDimSizes[splitDim] /= numSplits;
131
132 armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
133 for (int j = 0; j < numSplits; ++j)
134 {
135 // Set the size of the views.
136 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
137 {
138 splitDescriptor.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
139 }
140 splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
141 }
142
143 armnn::BackendId setBackend;
144 if (!delegateData.m_Network)
145 {
146 // Check if supported
147 bool isSupported = false;
148 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPLIT",
149 tfLiteContext,
150 IsSplitterSupported,
151 delegateData.m_Backends,
152 isSupported,
153 setBackend,
154 inputTensorInfo,
155 outputTensorInfos,
156 splitDescriptor);
157 return isSupported ? kTfLiteOk : kTfLiteError;
158 }
159
Mike Kellya2806502023-08-03 10:42:11 +0100160 auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex);
161 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
David Monahanc833cef2023-05-03 15:53:03 +0100162 layer->SetBackendId(setBackend);
163 ARMNN_ASSERT(layer != nullptr);
164
165 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
166 {
167 layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
168 }
169
170 // Connect the input slots
Ryan OShea59f8f652023-05-11 20:37:53 +0100171 delegateData.m_OutputSlotForNode[inputTensors[1]]->Connect(layer->GetInputSlot(0));
172
173 if(numSplits != static_cast<int>(layer->GetNumOutputSlots()))
David Monahanc833cef2023-05-03 15:53:03 +0100174 {
Ryan OShea59f8f652023-05-11 20:37:53 +0100175 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
176 tfLiteContext,
177 "TfLiteOpaqueArmnnDelegate: Expected number of splits #%d does not "
178 "match the number of output slots #%d in node #%d: ",
179 numSplits, layer->GetNumOutputSlots(), nodeIndex);
David Monahanc833cef2023-05-03 15:53:03 +0100180 return kTfLiteError;
181 }
Ryan OShea59f8f652023-05-11 20:37:53 +0100182
183 // Prepare output slots
184 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
185 {
186 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
187 delegateData.m_OutputSlotForNode[
188 static_cast<unsigned long>(outputTensors[outputIndex])] = &outputSlot;
189 }
David Monahanc833cef2023-05-03 15:53:03 +0100190 return kTfLiteOk;
191}
192
193TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
194 TfLiteOpaqueContext* tfLiteContext,
195 TfLiteOpaqueNode* tfLiteNode,
196 int nodeIndex,
197 int32_t tfLiteSplitVOperatorCode)
198{
199
200 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
201
202 const int* inputTensors;
203 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
204 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
205 {
206 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
207 tfLiteContext,
208 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
209 nodeIndex);
210 return kTfLiteError;
211 }
212
213 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
214 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitVOperatorCode, nodeIndex))
215 {
216 return kTfLiteError;
217 }
218
219 const TfLiteOpaqueTensor* tfLiteSplitsTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
220 if (!IsValid(tfLiteContext, tfLiteSplitsTensor, tfLiteSplitVOperatorCode, nodeIndex))
221 {
222 return kTfLiteError;
223 }
224
225 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
226 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitVOperatorCode, nodeIndex))
227 {
228 return kTfLiteError;
229 }
230
231 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
232 const armnn::TensorInfo& splitsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteSplitsTensor);
233 ARMNN_ASSERT(splitsTensorInfo.GetNumDimensions() == 1);
234 ARMNN_ASSERT(GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() == 1);
235
236 auto* axisTensorDataPtr = static_cast<uint32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
237 std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
238 int32_t axis = axisTensorData[0];
239
240 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
241 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
242 {
243 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
244 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +0100245 "TfLiteOpaqueArmnnDelegate: Operation has invalid axis: #%d. "
246 "Axis must be in range [-n, n) in node #%d:",
David Monahanc833cef2023-05-03 15:53:03 +0100247 axis, nodeIndex);
248 }
249 const unsigned int splitDim = ComputeWrappedIndex(axisTensorData[0], inputTensorInfo.GetNumDimensions());
250
251 auto* splitVParameters = reinterpret_cast<TfLiteSplitVParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
252 int numSplits = 0;
253 if (splitVParameters)
254 {
255 numSplits = NonNegative(splitVParameters->num_splits, nodeIndex);
256 }
257 else
258 {
259 numSplits = splitsTensorInfo.GetNumElements();
260 }
261
262 if (numSplits <= 0)
263 {
264 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
Ryan OShea59f8f652023-05-11 20:37:53 +0100265 tfLiteContext,
266 "TfLiteOpaqueArmnnDelegate: Invalid number of splits %d in node #%d",
David Monahanc833cef2023-05-03 15:53:03 +0100267 numSplits, nodeIndex);
268 return kTfLiteError;
269 }
270
271 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
272
273 // Gather output indices and use to get output tensors.
274 const int* outputTensors;
275 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numSplits) != kTfLiteOk)
276 {
277 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
278 tfLiteContext,
279 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
280 nodeIndex);
281 return kTfLiteError;
282 }
283 std::vector<armnn::TensorInfo> outputs;
284 for (int i = 0; i < numSplits; ++i)
285 {
286 const TfLiteOpaqueTensor* tfLiteOutputTensor =
287 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[i]);
288 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitVOperatorCode, nodeIndex))
289 {
290 return kTfLiteError;
291 }
292 outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
293 }
294 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
295
296 auto inputDimSize = inputTensorInfo.GetNumDimensions();
297 if (inputDimSize > MaxNumOfTensorDimensions)
298 {
299 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
300 tfLiteContext,
Ryan OShea59f8f652023-05-11 20:37:53 +0100301 "TfLiteOpaqueArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be "
302 "greater than #%d in node #%d: ",
303 inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
David Monahanc833cef2023-05-03 15:53:03 +0100304 return kTfLiteError;
305 }
306
307 std::vector<int32_t> splitsTensorData(numSplits);
308 std::memcpy(splitsTensorData.data(), TfLiteOpaqueTensorData(tfLiteSplitsTensor), splitsTensorInfo.GetNumBytes());
309
310
311 unsigned int index = 0;
312 unsigned int inferredIndex = 0;
313 int numberOfInferred = 0;
314 int splitSum = 0;
315
316 for (auto splitData : splitsTensorData)
317 {
318 if (splitData < 0)
319 {
320 ++numberOfInferred;
321 inferredIndex = index;
322 }
323 else
324 {
325 splitSum += splitData;
326 }
327 ++index;
328 }
329
330 // Check for inferred axis
331 if (numberOfInferred == 0)
332 {
333 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
334 {
335 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
Ryan OShea59f8f652023-05-11 20:37:53 +0100336 tfLiteContext,
337 "TfLiteOpaqueArmnnDelegate: SplitV split_sizes does not sum to the dimension "
338 "of value along split_dim in node #%d",
339 nodeIndex);
David Monahanc833cef2023-05-03 15:53:03 +0100340 return kTfLiteError;
341 }
342 }
343 else if (numberOfInferred == 1)
344 {
345 splitsTensorData[inferredIndex] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
346 }
347 else
348 {
349 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
Ryan OShea59f8f652023-05-11 20:37:53 +0100350 tfLiteContext,
351 "TfLiteOpaqueArmnnDelegate: SplitV cannot infer split size for "
352 "more than one split in node #%d",
David Monahanc833cef2023-05-03 15:53:03 +0100353 nodeIndex);
354 return kTfLiteError;
355 }
356
357 armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
358 unsigned int accumSplit = 0;
359 for (int j = 0; j < numSplits; ++j)
360 {
361 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsTensorData[j]);
362
363 // Set the size of the views.
364 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
365 {
366 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
367 if (dimIdx == splitDim)
368 {
369 dimSize = splitSize;
370 }
371 splitDescriptor.SetViewSize(j, dimIdx, dimSize);
372 }
373
374 splitDescriptor.SetViewOriginCoord(j, splitDim, accumSplit);
375 accumSplit += splitSize;
376 }
377
378 armnn::BackendId setBackend;
379 if (!delegateData.m_Network)
380 {
381 // Check if supported
382 bool isSupported = false;
383 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPLITV",
384 tfLiteContext,
385 IsSplitterSupported,
386 delegateData.m_Backends,
387 isSupported,
388 setBackend,
389 inputTensorInfo,
390 outputTensorInfos,
391 splitDescriptor);
392 return isSupported ? kTfLiteOk : kTfLiteError;
393 }
394
Mike Kellya2806502023-08-03 10:42:11 +0100395 auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex);
396 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor, layerName.c_str());
David Monahanc833cef2023-05-03 15:53:03 +0100397 layer->SetBackendId(setBackend);
398 ARMNN_ASSERT(layer != nullptr);
399
400 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
401 {
402 layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
403 }
404
405 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100406 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
David Monahanc833cef2023-05-03 15:53:03 +0100407 {
408 return kTfLiteError;
409 }
410
411 // Connect
412 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
413}
414
415} // namespace armnnOpaqueDelegate