blob: 74da97966615c488849d5e3f616c5f944831ce2f [file] [log] [blame]
Sadik Armagan34fa1bd2020-11-27 12:40:52 +00001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include "DelegateUtils.hpp"
9
10#include <algorithm>
11#include <iterator>
12#include <string>
13#include <vector>
14
15namespace armnnDelegate
16{
17
18constexpr unsigned int MaxNumOfTensorDimensions = 5U;
19
20TfLiteStatus VisitSplitOperator(DelegateData& delegateData,
21 TfLiteContext* tfLiteContext,
22 TfLiteNode* tfLiteNode,
23 int nodeIndex,
24 int32_t tfLiteSplitOperatorCode)
25{
26 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
27
28 auto* splitParameters = reinterpret_cast<TfLiteSplitParams*>(tfLiteNode->builtin_data);
29 const unsigned int numSplits = NonNegative(splitParameters->num_splits, nodeIndex);
30
31 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
32
33 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
34 const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
35 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitOperatorCode, nodeIndex))
36 {
37 return kTfLiteError;
38 }
39
40 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
41 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitOperatorCode, nodeIndex))
42 {
43 return kTfLiteError;
44 }
45
46 const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
47 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
48
49 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
50 auto* axisTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
51 std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
52 const unsigned int splitDim = axisTensorData[0];
53
54 std::vector<armnn::TensorInfo> outputs;
55 for (unsigned int i = 0; i < numSplits; ++i)
56 {
57 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
58 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitOperatorCode, nodeIndex))
59 {
60 return kTfLiteError;
61 }
62 outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor));
63 }
64 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
65
66 auto inputDimSize = inputTensorInfo.GetNumDimensions();
67 if (inputDimSize > MaxNumOfTensorDimensions)
68 {
69 TF_LITE_MAYBE_KERNEL_LOG(
70 tfLiteContext,
71 "TfLiteArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be greater "
72 "than #%d in node #%d: ", inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
73 return kTfLiteError;
74 }
75
76 std::vector<unsigned int> splitterDimSizes(inputDimSize);
77
78 // Add current input shape to splitterDimSizes
79 for (unsigned int i = 0; i < inputDimSize; ++i)
80 {
81 splitterDimSizes[i] = inputTensorInfo.GetShape()[i];
82 }
83
84 if (splitterDimSizes[splitDim] % numSplits != 0)
85 {
86 TF_LITE_MAYBE_KERNEL_LOG(
87 tfLiteContext,
88 "TfLiteArmnnDelegate: Number of splits #%d must evenly divide the dimension #%d in node #%d: ",
89 numSplits, splitterDimSizes[splitDim], nodeIndex);
90 return kTfLiteError;
91 }
92 splitterDimSizes[splitDim] /= numSplits;
93
94 armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
95 for (unsigned int j = 0; j < numSplits; ++j)
96 {
97 // Set the size of the views.
98 for (unsigned int dimIdx = 0; dimIdx < splitterDimSizes.size(); ++dimIdx)
99 {
100 splitDescriptor.SetViewSize(j, dimIdx, splitterDimSizes[dimIdx]);
101 }
102 splitDescriptor.SetViewOriginCoord(j, splitDim, splitterDimSizes[splitDim] * j);
103 }
104
105 if (!delegateData.m_Network)
106 {
107 // Check if supported
108 bool isSupported = false;
109 FORWARD_LAYER_SUPPORT_FUNC(__func__,
110 tfLiteContext,
111 IsSplitterSupported,
112 delegateData.m_Backends,
113 isSupported,
114 inputTensorInfo,
115 outputTensorInfos,
116 splitDescriptor);
117 return isSupported ? kTfLiteOk : kTfLiteError;
118 }
119
120 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
121 ARMNN_ASSERT(layer != nullptr);
122
123 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
124 {
125 layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
126 }
127
128 // Connect the input slots
129 delegateData.m_OutputSlotForNode[tfLiteNode->inputs->data[1]]->Connect(layer->GetInputSlot(0));
130
131 // Prepare output slots
132 for (unsigned int outputIndex = 0; outputIndex < layer->GetNumOutputSlots(); ++outputIndex)
133 {
134 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(outputIndex);
135 delegateData.m_OutputSlotForNode[
136 static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
137 }
138
139 return kTfLiteOk;
140}
141
142TfLiteStatus VisitSplitVOperator(DelegateData& delegateData,
143 TfLiteContext* tfLiteContext,
144 TfLiteNode* tfLiteNode,
145 int nodeIndex,
146 int32_t tfLiteSplitVOperatorCode)
147{
148 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
149
150 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
151 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
152 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteSplitVOperatorCode, nodeIndex))
153 {
154 return kTfLiteError;
155 }
156
157 const TfLiteTensor& tfLiteSplitsTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
158 if (!IsValid(tfLiteContext, tfLiteSplitsTensor, tfLiteSplitVOperatorCode, nodeIndex))
159 {
160 return kTfLiteError;
161 }
162
163 const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[2]];
164 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteSplitVOperatorCode, nodeIndex))
165 {
166 return kTfLiteError;
167 }
168
169 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
170 const armnn::TensorInfo& splitsTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteSplitsTensor);
171 ARMNN_ASSERT(splitsTensorInfo.GetNumDimensions() == 1);
172
173 const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
174 ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
175 auto* axisTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
176 std::vector<int32_t> axisTensorData(axisTensorDataPtr, axisTensorDataPtr + 1);
177
178 auto ComputeWrappedIndex = [](int index, unsigned int numDimensions)
179 {
180 int numDims = armnn::numeric_cast<int>(numDimensions);
181 int wrappedIndex = index < 0 ? numDims + index : index;
182 ARMNN_ASSERT(wrappedIndex >= 0);
183 ARMNN_ASSERT(wrappedIndex < numDims);
184
185 return static_cast<unsigned int>(wrappedIndex);
186 };
187
188 const unsigned int splitDim = ComputeWrappedIndex(axisTensorData[0],
189 inputTensorInfo.GetNumDimensions());
190
191 auto* splitVParameters = reinterpret_cast<TfLiteSplitVParams*>(tfLiteNode->builtin_data);
192 unsigned int numSplits = 0;
193 if (splitVParameters)
194 {
195 numSplits = NonNegative(splitVParameters->num_splits, nodeIndex);
196 }
197 else
198 {
199 numSplits = splitsTensorInfo.GetNumElements();
200 }
201
202 if (numSplits <= 0)
203 {
204 TF_LITE_MAYBE_KERNEL_LOG(
205 tfLiteContext, "TfLiteArmnnDelegate: Invalid number of splits %d in node #%d",
206 numSplits, nodeIndex);
207 return kTfLiteError;
208 }
209
210 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, numSplits, nodeIndex));
211 std::vector<armnn::TensorInfo> outputs;
212 for (unsigned int i = 0; i < numSplits; ++i)
213 {
214 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[i]];
215 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSplitVOperatorCode, nodeIndex))
216 {
217 return kTfLiteError;
218 }
219 outputs.push_back(GetTensorInfoForTfLiteTensor(tfLiteOutputTensor));
220 }
221 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
222
223 auto inputDimSize = inputTensorInfo.GetNumDimensions();
224 if (inputDimSize > MaxNumOfTensorDimensions)
225 {
226 TF_LITE_MAYBE_KERNEL_LOG(
227 tfLiteContext,
228 "TfLiteArmnnDelegate: The number of dimensions: #%d for input tensors of the split op cannot be greater "
229 "than #%d in node #%d: ", inputDimSize, MaxNumOfTensorDimensions, nodeIndex);
230 return kTfLiteError;
231 }
232
233 std::vector<int32_t> splitsTensorData(numSplits);
234#ifdef __STDC_LIB_EXT1__
235 ::memcpy_s(splitsTensorData.data(), sizeof(splitsTensorData),
236 tfLiteSplitsTensor.data.data, splitsTensorInfo.GetNumBytes());
237#else
238 ::memcpy(splitsTensorData.data(), tfLiteSplitsTensor.data.data, splitsTensorInfo.GetNumBytes());
239#endif
240
241 unsigned int index = 0;
242 unsigned int inferredIndex = 0;
243 int numberOfInferred = 0;
244 int splitSum = 0;
245
246 for (auto splitData : splitsTensorData)
247 {
248 if (splitData < 0)
249 {
250 ++numberOfInferred;
251 inferredIndex = index;
252 }
253 else
254 {
255 splitSum += splitData;
256 }
257 ++index;
258 }
259
260 // Check for inferred axis
261 if (numberOfInferred == 0)
262 {
263 if (splitSum != armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]))
264 {
265 TF_LITE_MAYBE_KERNEL_LOG(
266 tfLiteContext, "TfLiteArmnnDelegate: SplitV split_sizes does not sum to the dimension of value along"
267 " split_dim in node #%d", nodeIndex);
268 return kTfLiteError;
269 }
270 }
271 else if (numberOfInferred == 1)
272 {
273 splitsTensorData[inferredIndex] = armnn::numeric_cast<int>(inputTensorInfo.GetShape()[splitDim]) - splitSum;
274 }
275 else
276 {
277 TF_LITE_MAYBE_KERNEL_LOG(
278 tfLiteContext, "TfLiteArmnnDelegate: SplitV cannot infer split size for more than one split in node #%d",
279 nodeIndex);
280 return kTfLiteError;
281 }
282
283 armnn::SplitterDescriptor splitDescriptor(numSplits, inputDimSize);
284 unsigned int accumSplit = 0;
285 for (unsigned int j = 0; j < numSplits; ++j)
286 {
287 unsigned int splitSize = armnn::numeric_cast<unsigned int>(splitsTensorData[j]);
288
289 // Set the size of the views.
290 for (unsigned int dimIdx = 0; dimIdx < inputTensorInfo.GetNumDimensions(); ++dimIdx)
291 {
292 unsigned int dimSize = inputTensorInfo.GetShape()[dimIdx];
293 if (dimIdx == splitDim)
294 {
295 dimSize = splitSize;
296 }
297 splitDescriptor.SetViewSize(j, dimIdx, dimSize);
298 }
299
300 splitDescriptor.SetViewOriginCoord(j, splitDim, accumSplit);
301 accumSplit += splitSize;
302 }
303
304 if (!delegateData.m_Network)
305 {
306 // Check if supported
307 bool isSupported = false;
308 FORWARD_LAYER_SUPPORT_FUNC(__func__,
309 tfLiteContext,
310 IsSplitterSupported,
311 delegateData.m_Backends,
312 isSupported,
313 inputTensorInfo,
314 outputTensorInfos,
315 splitDescriptor);
316 return isSupported ? kTfLiteOk : kTfLiteError;
317 }
318
319 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSplitterLayer(splitDescriptor);
320 ARMNN_ASSERT(layer != nullptr);
321
322 for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
323 {
324 layer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
325 }
326
327 // Connect
328 return Connect(layer, tfLiteNode, delegateData);
329}
330
331} // namespace armnnDelegate