blob: abc6b6fb0e56c817003cdb8edb4cf1e026c22e59 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01005
6#pragma once
7
8#include <DelegateUtils.hpp>
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01009#include <OpaqueDelegateUtils.hpp>
Matthew Sloyan2b04ec32023-04-26 11:42:46 +010010
11namespace armnnOpaqueDelegate
12{
13
14TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
15 TfLiteOpaqueContext* tfLiteContext,
16 TfLiteOpaqueNode* tfLiteNode,
17 int nodeIndex,
18 int32_t tfLiteConcatOperatorCode)
19{
20 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
21 if (numInputs < 2)
22 {
23 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
24 tfLiteContext,
25 "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
26 2, numInputs, nodeIndex);
27 return kTfLiteError;
28 }
29 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
30
31 // Gather input indices and use to get input tensor.
32 const int* inputTensors;
33 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
34 {
35 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
36 tfLiteContext,
37 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
38 nodeIndex);
39 return kTfLiteError;
40 }
41
42 std::vector<armnn::TensorInfo> inputTensorInfos;
43 for (int i = 0; i < numInputs; ++i)
44 {
45 const TfLiteOpaqueTensor* inputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[i]);
46 if (!IsValid(tfLiteContext, inputTensor, tfLiteConcatOperatorCode, nodeIndex))
47 {
48 return kTfLiteError;
49 }
50
51 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(inputTensor);
52 inputTensorInfos.emplace_back(inputTensorInfo);
53 }
54
55 // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
56 std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
57 std::transform(inputTensorInfos.begin(),
58 inputTensorInfos.end(),
59 std::back_inserter(inputConstTensorInfos),
60 [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
61
62 // Gather output indices and use to get output tensors.
63 int numOutputs = 0;
64 const int* outputTensors;
65 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
66 {
67 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
68 tfLiteContext,
69 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
70 nodeIndex);
71 return kTfLiteError;
72 }
73
74 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
75 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteConcatOperatorCode, nodeIndex))
76 {
77 return kTfLiteError;
78 }
79
80 // Setup OriginsDescriptor, axis and view origin
81 auto numConcatView = static_cast<unsigned int>(numInputs);
82 uint32_t inputRank = TfLiteOpaqueTensorNumDims(TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]));
83
84 auto* concatenationParameters =
85 reinterpret_cast<TfLiteConcatenationParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
86
87 if(!concatenationParameters)
88 {
89 throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex ]);
90 }
91
92 const auto concatDimInput = static_cast<unsigned int>(
93 (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
94
95 armnn::OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
96 concatDescriptor.SetConcatAxis(concatDimInput);
97
98 unsigned int mergeDimOrigin = 0;
99 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
100 {
101 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(
102 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[viewIndex]));
103
104 // Sets up concatDescriptor view origin
105 SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
106 }
107
108 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
109
110 // Verify we support the fused activation before attempting to create a layer
111 TfLiteFusedActivation activationType = concatenationParameters->activation;
112
113 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
114 outputTensorInfo, activationType);
115 if(activationStatus != kTfLiteOk)
116 {
117 return kTfLiteError;
118 }
119
120 // Check if supported
121 bool isSupported = false;
122 armnn::BackendId setBackend;
123 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
124 {
125 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONCATENATION",
126 tfLiteContext,
127 IsConcatSupported,
128 delegateData.m_Backends,
129 isSupported,
130 setBackend,
131 inputConstTensorInfos,
132 outputTensorInfo,
133 concatDescriptor);
134 };
135
136 if (!delegateData.m_Network)
137 {
138 validateFunc(outputTensorInfo, isSupported);
139 return isSupported ? kTfLiteOk : kTfLiteError;
140 }
141
142 // Setup layer and connect.
143 armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
144 concatenationLayer->SetBackendId(setBackend);
145 ARMNN_ASSERT(concatenationLayer != nullptr);
146
147 // Connect the Constant Inputs
148 auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
149 delegateData,
150 tfLiteContext,
151 tfLiteNode);
152 if (inputsTensorsProcess == kTfLiteError)
153 {
154 return inputsTensorsProcess;
155 }
156
157 armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
158 outputSlot.SetTensorInfo(outputTensorInfo);
159 if(Connect(concatenationLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
160 {
161 return kTfLiteError;
162 }
163
164 if (activationType == kTfLiteActNone)
165 {
166 // No Activation
167 return kTfLiteOk;
168 }
169
170 // Check and Create activation
171 return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
172}
173
174TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
175 TfLiteOpaqueContext* tfLiteContext,
176 TfLiteOpaqueNode* tfLiteNode,
177 int nodeIndex,
178 int32_t tfLiteMeanOperatorCode)
179{
180 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
181 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
182
183 // Gather input indices and use to get input tensor.
184 int numInputs = 0;
185 const int* inputTensors;
186 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
187 {
188 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
189 tfLiteContext,
190 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
191 nodeIndex);
192 return kTfLiteError;
193 }
194
195 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
196 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteMeanOperatorCode, nodeIndex))
197 {
198 return kTfLiteError;
199 }
200
201 // Use input indices to get axis tensor.
202 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
203 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteMeanOperatorCode, nodeIndex))
204 {
205 return kTfLiteError;
206 }
207
208 // Gather output indices and use to get output tensors.
209 int numOutputs = 0;
210 const int* outputTensors;
211 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
212 {
213 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
214 tfLiteContext,
215 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
216 nodeIndex);
217 return kTfLiteError;
218 }
219
220 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
221 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteMeanOperatorCode, nodeIndex))
222 {
223 return kTfLiteError;
224 }
225
226 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
227 const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor);
228 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
229
230 auto* axisTensorData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
231
232 std::vector<int32_t> axis;
233 // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
234 for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
235 {
236 axis.emplace_back(axisTensorData[i]);
237 }
238
239 // Convert the axis to unsigned int and remove duplicates.
240 unsigned int rank = inputTensorInfo.GetNumDimensions();
241 std::set<unsigned int> uniqueAxis;
242 std::transform(axis.begin(),
243 axis.end(),
244 std::inserter(uniqueAxis, uniqueAxis.begin()),
245 [rank](int i)->unsigned int{ return (i + rank) % rank; });
246
247 // Setup MeanDescriptor and assign axis and keepDims
248 armnn::MeanDescriptor desc;
249 desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
250 desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
251
252 // Check if supported
253 bool isSupported = false;
254 armnn::BackendId setBackend;
255 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
256 {
257 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("MEAN",
258 tfLiteContext,
259 IsMeanSupported,
260 delegateData.m_Backends,
261 isSupported,
262 setBackend,
263 inputTensorInfo,
264 outputTensorInfo,
265 desc);
266 };
267
268 if (!delegateData.m_Network)
269 {
270 validateFunc(outputTensorInfo, isSupported);
271 return isSupported ? kTfLiteOk : kTfLiteError;
272 }
273
274 // Setup layer and connect.
275 armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
276 meanLayer->SetBackendId(setBackend);
277 ARMNN_ASSERT(meanLayer != nullptr);
278
279 armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
280 outputSlot.SetTensorInfo(outputTensorInfo);
281
282 // try to connect the Constant Inputs if there are any
283 if(ProcessInputs(meanLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
284 {
285 return kTfLiteError;
286 }
287
288 return Connect(meanLayer, tfLiteContext, tfLiteNode, delegateData);
289}
290
291TfLiteStatus VisitControlOperator(DelegateData& delegateData,
292 TfLiteOpaqueContext* tfLiteContext,
293 TfLiteOpaqueNode* tfLiteNode,
294 int nodeIndex,
295 int32_t operatorCode)
296{
297 switch(operatorCode)
298 {
299 case kTfLiteBuiltinConcatenation:
300 return VisitConcatenationOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
301 case kTfLiteBuiltinMean:
302 return VisitMeanOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
303 default:
304 return kTfLiteError;
305 }
306}
307
308} // namespace armnnDelegate
309