blob: 3840ed9620a49d0cf8d420d2da04166759920d60 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
David Monahanc833cef2023-05-03 15:53:03 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9#include <DelegateUtils.hpp>
10
11#include <algorithm>
12#include <iterator>
13#include <vector>
14
15namespace armnnOpaqueDelegate
16{
17
18constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19
20TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
21 TfLiteOpaqueContext* tfLiteContext,
22 TfLiteOpaqueNode* tfLiteNode,
23 int nodeIndex,
24 int32_t tfLiteSplitOperatorCode)
25{
26 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
27
28 auto* splitParameters = reinterpret_cast<TfLiteSplitParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
29 int numSplits = NonNegative(splitParameters->num_splits, nodeIndex);
30
31 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
32
33 // Gather input indices and use to get Axis tensor.
34 const int* inputTensors;
35 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
36 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
37 {
38 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
39 tfLiteContext,
40 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
41 nodeIndex);
42 return kTfLiteError;
43 }
44
45 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
46 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitOperatorCode, nodeIndex))
47 {
48 return kTfLiteError;
49 }
50
51 // Use input indices to get input tensor.
52 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
53 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitOperatorCode, nodeIndex))
54 {
55 return kTfLiteError;
56 }
57
58 // Gather output indices and use to get output tensors.
59 const int* outputTensors;
60 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numSplits) != kTfLiteOk)
61 {
62 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
63 tfLiteContext,
64 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
65 nodeIndex);
66 return kTfLiteError;
67 }
68
69 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
70
71 ARMNN_ASSERT(GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() == 1);
72 auto* axisTensorDataPtr = static_cast<uint32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
73 std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
74 int32_t axis = axisTensorData[0];
75
76 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
77 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
78 {
79 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
80 // E.g. Rank 4 tensor can have axis in range [-4, 3)
81 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
82 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
83 tfLiteContext,
84 "TfLiteArmnnDelegate: Operation has invalid axis: #%d. Axis must be in range [-n, n) in node #%d:",
85 axis, nodeIndex);
86 }
87 const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
88
89 std::vector<armnn::TensorInfo> outputs;
90 for (int i = 0; i < numSplits; ++i)
91 {
92 const TfLiteOpaqueTensor* tfLiteOutputTensor =
93 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[i]);
94 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitOperatorCode, nodeIndex))
95 {
96 return kTfLiteError;
97 }
98 outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
99 }
100 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
101
102 auto inputDimSize = inputTensorInfo.GetNumDimensions();
103 if (inputDimSize > MaxNumOfTensorDimensions)
104 {
105 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
106 tfLiteContext,
107 "TfLiteArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be "
108 "greater than #%d in node #%d: ", inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
109 return kTfLiteError;
110 }
111
112 std::vector<unsigned int> splitterDimSizes(inputDimSize);
113
114 // Add current input shape to splitterDimSizes
115 for (unsigned int i = 0; i < inputDimSize; ++i)
116 {
117 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
118 }
119
120 if (splitterDimSizes[splitDim] % numSplits != 0)
121 {
122 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
123 tfLiteContext,
124 "TfLiteArmnnDelegate: Number of splits #%d must evenly divide the dimension #%d in node #%d: ",
125 numSplits, splitterDimSizes[splitDim], nodeIndex);
126 return kTfLiteError;
127 }
128 splitterDimSizes[splitDim] /= numSplits;
129
130 armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
131 for (int j = 0; j < numSplits; ++j)
132 {
133 // Set the size of the views.
134 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
135 {
136 splitDescriptor.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
137 }
138 splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
139 }
140
141 armnn::BackendId setBackend;
142 if (!delegateData.m_Network)
143 {
144 // Check if supported
145 bool isSupported = false;
146 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPLIT",
147 tfLiteContext,
148 IsSplitterSupported,
149 delegateData.m_Backends,
150 isSupported,
151 setBackend,
152 inputTensorInfo,
153 outputTensorInfos,
154 splitDescriptor);
155 return isSupported ? kTfLiteOk : kTfLiteError;
156 }
157
158 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
159 layer->SetBackendId(setBackend);
160 ARMNN_ASSERT(layer != nullptr);
161
162 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
163 {
164 layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
165 }
166
167 // Connect the input slots
168 if(Connect(layer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
169 {
170 return kTfLiteError;
171 }
172 return kTfLiteOk;
173}
174
175TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
176 TfLiteOpaqueContext* tfLiteContext,
177 TfLiteOpaqueNode* tfLiteNode,
178 int nodeIndex,
179 int32_t tfLiteSplitVOperatorCode)
180{
181
182 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
183
184 const int* inputTensors;
185 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
186 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
187 {
188 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
189 tfLiteContext,
190 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
191 nodeIndex);
192 return kTfLiteError;
193 }
194
195 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
196 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitVOperatorCode, nodeIndex))
197 {
198 return kTfLiteError;
199 }
200
201 const TfLiteOpaqueTensor* tfLiteSplitsTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
202 if (!IsValid(tfLiteContext, tfLiteSplitsTensor, tfLiteSplitVOperatorCode, nodeIndex))
203 {
204 return kTfLiteError;
205 }
206
207 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[2]);
208 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitVOperatorCode, nodeIndex))
209 {
210 return kTfLiteError;
211 }
212
213 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
214 const armnn::TensorInfo& splitsTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteSplitsTensor);
215 ARMNN_ASSERT(splitsTensorInfo.GetNumDimensions() == 1);
216 ARMNN_ASSERT(GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor).GetNumElements() == 1);
217
218 auto* axisTensorDataPtr = static_cast<uint32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
219 std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
220 int32_t axis = axisTensorData[0];
221
222 auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
223 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
224 {
225 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
226 tfLiteContext,
227 "TfLiteArmnnDelegate: Operation has invalid axis: #%d. Axis must be in range [-n, n) in node #%d:",
228 axis, nodeIndex);
229 }
230 const unsigned int splitDim = ComputeWrappedIndex(axisTensorData[0], inputTensorInfo.GetNumDimensions());
231
232 auto* splitVParameters = reinterpret_cast<TfLiteSplitVParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
233 int numSplits = 0;
234 if (splitVParameters)
235 {
236 numSplits = NonNegative(splitVParameters->num_splits, nodeIndex);
237 }
238 else
239 {
240 numSplits = splitsTensorInfo.GetNumElements();
241 }
242
243 if (numSplits <= 0)
244 {
245 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
246 tfLiteContext, "TfLiteArmnnDelegate: Invalid number of splits %d in node #%d",
247 numSplits, nodeIndex);
248 return kTfLiteError;
249 }
250
251 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
252
253 // Gather output indices and use to get output tensors.
254 const int* outputTensors;
255 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numSplits) != kTfLiteOk)
256 {
257 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
258 tfLiteContext,
259 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
260 nodeIndex);
261 return kTfLiteError;
262 }
263 std::vector<armnn::TensorInfo> outputs;
264 for (int i = 0; i < numSplits; ++i)
265 {
266 const TfLiteOpaqueTensor* tfLiteOutputTensor =
267 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[i]);
268 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitVOperatorCode, nodeIndex))
269 {
270 return kTfLiteError;
271 }
272 outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
273 }
274 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
275
276 auto inputDimSize = inputTensorInfo.GetNumDimensions();
277 if (inputDimSize > MaxNumOfTensorDimensions)
278 {
279 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
280 tfLiteContext,
281 "TfLiteArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be "
282 "greater than #%d in node #%d: ", inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
283 return kTfLiteError;
284 }
285
286 std::vector<int32_t> splitsTensorData(numSplits);
287 std::memcpy(splitsTensorData.data(), TfLiteOpaqueTensorData(tfLiteSplitsTensor), splitsTensorInfo.GetNumBytes());
288
289
290 unsigned int index = 0;
291 unsigned int inferredIndex = 0;
292 int numberOfInferred = 0;
293 int splitSum = 0;
294
295 for (auto splitData : splitsTensorData)
296 {
297 if (splitData < 0)
298 {
299 ++numberOfInferred;
300 inferredIndex = index;
301 }
302 else
303 {
304 splitSum += splitData;
305 }
306 ++index;
307 }
308
309 // Check for inferred axis
310 if (numberOfInferred == 0)
311 {
312 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
313 {
314 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
315 tfLiteContext, "TfLiteArmnnDelegate: SplitV split_sizes does not sum to the dimension of value "
316 "along split_dim in node #%d", nodeIndex);
317 return kTfLiteError;
318 }
319 }
320 else if (numberOfInferred == 1)
321 {
322 splitsTensorData[inferredIndex] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
323 }
324 else
325 {
326 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
327 tfLiteContext, "TfLiteArmnnDelegate: SplitV cannot infer split size for "
328 "more than one split in node #%d",
329 nodeIndex);
330 return kTfLiteError;
331 }
332
333 armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
334 unsigned int accumSplit = 0;
335 for (int j = 0; j < numSplits; ++j)
336 {
337 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsTensorData[j]);
338
339 // Set the size of the views.
340 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
341 {
342 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
343 if (dimIdx == splitDim)
344 {
345 dimSize = splitSize;
346 }
347 splitDescriptor.SetViewSize(j, dimIdx, dimSize);
348 }
349
350 splitDescriptor.SetViewOriginCoord(j, splitDim, accumSplit);
351 accumSplit += splitSize;
352 }
353
354 armnn::BackendId setBackend;
355 if (!delegateData.m_Network)
356 {
357 // Check if supported
358 bool isSupported = false;
359 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SPLITV",
360 tfLiteContext,
361 IsSplitterSupported,
362 delegateData.m_Backends,
363 isSupported,
364 setBackend,
365 inputTensorInfo,
366 outputTensorInfos,
367 splitDescriptor);
368 return isSupported ? kTfLiteOk : kTfLiteError;
369 }
370
371 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
372 layer->SetBackendId(setBackend);
373 ARMNN_ASSERT(layer != nullptr);
374
375 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
376 {
377 layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
378 }
379
380 // try to connect the Constant Inputs if there are any
381 if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
382 {
383 return kTfLiteError;
384 }
385
386 // Connect
387 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
388}
389
390} // namespace armnnOpaqueDelegate