blob: 9aef8380af053beb6b06b29006a363704699fea4 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01005
6#pragma once
7
8#include <DelegateUtils.hpp>
Matthew Sloyan0bd4c622023-04-27 11:48:26 +01009#include <OpaqueDelegateUtils.hpp>
Matthew Sloyan2b04ec32023-04-26 11:42:46 +010010
11namespace armnnOpaqueDelegate
12{
13
14TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
15 TfLiteOpaqueContext* tfLiteContext,
16 TfLiteOpaqueNode* tfLiteNode,
17 int nodeIndex,
18 int32_t tfLiteConcatOperatorCode)
19{
20 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
21 if (numInputs < 2)
22 {
23 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
24 tfLiteContext,
25 "TfLiteArmnnOpaqueDelegate: Minimum number of inputs (%d != %d) in node #%d",
26 2, numInputs, nodeIndex);
27 return kTfLiteError;
28 }
29 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
30
31 // Gather input indices and use to get input tensor.
32 const int* inputTensors;
33 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
34 {
35 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
36 tfLiteContext,
37 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
38 nodeIndex);
39 return kTfLiteError;
40 }
41
42 std::vector<armnn::TensorInfo> inputTensorInfos;
43 for (int i = 0; i < numInputs; ++i)
44 {
45 const TfLiteOpaqueTensor* inputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[i]);
46 if (!IsValid(tfLiteContext, inputTensor, tfLiteConcatOperatorCode, nodeIndex))
47 {
48 return kTfLiteError;
49 }
50
51 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(inputTensor);
52 inputTensorInfos.emplace_back(inputTensorInfo);
53 }
54
55 // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
56 std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
57 std::transform(inputTensorInfos.begin(),
58 inputTensorInfos.end(),
59 std::back_inserter(inputConstTensorInfos),
60 [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
61
62 // Gather output indices and use to get output tensors.
63 int numOutputs = 0;
64 const int* outputTensors;
65 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
66 {
67 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
68 tfLiteContext,
69 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
70 nodeIndex);
71 return kTfLiteError;
72 }
73
74 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
75 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteConcatOperatorCode, nodeIndex))
76 {
77 return kTfLiteError;
78 }
79
80 // Setup OriginsDescriptor, axis and view origin
81 auto numConcatView = static_cast<unsigned int>(numInputs);
82 uint32_t inputRank = TfLiteOpaqueTensorNumDims(TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]));
83
84 auto* concatenationParameters =
85 reinterpret_cast<TfLiteConcatenationParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
86
87 if(!concatenationParameters)
88 {
Teresa Charlinf69ae562023-04-27 14:42:23 +010089 throw armnn::Exception(&"TfLiteArmnnOpaqueDelegate: Concat parameters are null in: " [ nodeIndex ]);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +010090 }
91
92 const auto concatDimInput = static_cast<unsigned int>(
93 (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
94
95 armnn::OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
96 concatDescriptor.SetConcatAxis(concatDimInput);
97
98 unsigned int mergeDimOrigin = 0;
99 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
100 {
101 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(
102 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[viewIndex]));
103
104 // Sets up concatDescriptor view origin
105 SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
106 }
107
108 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
109
110 // Verify we support the fused activation before attempting to create a layer
111 TfLiteFusedActivation activationType = concatenationParameters->activation;
112
113 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
114 outputTensorInfo, activationType);
115 if(activationStatus != kTfLiteOk)
116 {
117 return kTfLiteError;
118 }
119
120 // Check if supported
121 bool isSupported = false;
122 armnn::BackendId setBackend;
123 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
124 {
125 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CONCATENATION",
126 tfLiteContext,
127 IsConcatSupported,
128 delegateData.m_Backends,
129 isSupported,
130 setBackend,
131 inputConstTensorInfos,
132 outputTensorInfo,
133 concatDescriptor);
134 };
135
136 if (!delegateData.m_Network)
137 {
138 validateFunc(outputTensorInfo, isSupported);
139 return isSupported ? kTfLiteOk : kTfLiteError;
140 }
141
142 // Setup layer and connect.
Mike Kellya2806502023-08-03 10:42:11 +0100143 auto layerName = GetName(armnn::LayerType::Concat, nodeIndex);
144 armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor,
145 layerName.c_str());
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100146 concatenationLayer->SetBackendId(setBackend);
147 ARMNN_ASSERT(concatenationLayer != nullptr);
148
149 // Connect the Constant Inputs
150 auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
151 delegateData,
152 tfLiteContext,
Mike Kellya2806502023-08-03 10:42:11 +0100153 tfLiteNode,
154 nodeIndex);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100155 if (inputsTensorsProcess == kTfLiteError)
156 {
157 return inputsTensorsProcess;
158 }
159
160 armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
161 outputSlot.SetTensorInfo(outputTensorInfo);
162 if(Connect(concatenationLayer, tfLiteContext, tfLiteNode, delegateData) != kTfLiteOk)
163 {
164 return kTfLiteError;
165 }
166
167 if (activationType == kTfLiteActNone)
168 {
169 // No Activation
170 return kTfLiteOk;
171 }
172
173 // Check and Create activation
Mike Kellya2806502023-08-03 10:42:11 +0100174 return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData, nodeIndex);
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100175}
176
177TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
178 TfLiteOpaqueContext* tfLiteContext,
179 TfLiteOpaqueNode* tfLiteNode,
180 int nodeIndex,
181 int32_t tfLiteMeanOperatorCode)
182{
183 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
184 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
185
186 // Gather input indices and use to get input tensor.
187 int numInputs = 0;
188 const int* inputTensors;
189 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
190 {
191 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
192 tfLiteContext,
193 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
194 nodeIndex);
195 return kTfLiteError;
196 }
197
198 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
199 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteMeanOperatorCode, nodeIndex))
200 {
201 return kTfLiteError;
202 }
203
204 // Use input indices to get axis tensor.
205 const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
206 if (!IsValid(tfLiteContext, tfLiteAxisTensor, tfLiteMeanOperatorCode, nodeIndex))
207 {
208 return kTfLiteError;
209 }
210
211 // Gather output indices and use to get output tensors.
212 int numOutputs = 0;
213 const int* outputTensors;
214 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
215 {
216 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
217 tfLiteContext,
218 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
219 nodeIndex);
220 return kTfLiteError;
221 }
222
223 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
224 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteMeanOperatorCode, nodeIndex))
225 {
226 return kTfLiteError;
227 }
228
229 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
230 const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteAxisTensor);
231 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
232
233 auto* axisTensorData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
234
235 std::vector<int32_t> axis;
236 // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
237 for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
238 {
239 axis.emplace_back(axisTensorData[i]);
240 }
241
242 // Convert the axis to unsigned int and remove duplicates.
243 unsigned int rank = inputTensorInfo.GetNumDimensions();
244 std::set<unsigned int> uniqueAxis;
245 std::transform(axis.begin(),
246 axis.end(),
247 std::inserter(uniqueAxis, uniqueAxis.begin()),
248 [rank](int i)->unsigned int{ return (i + rank) % rank; });
249
250 // Setup MeanDescriptor and assign axis and keepDims
251 armnn::MeanDescriptor desc;
252 desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
253 desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
254
255 // Check if supported
256 bool isSupported = false;
257 armnn::BackendId setBackend;
258 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
259 {
260 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("MEAN",
261 tfLiteContext,
262 IsMeanSupported,
263 delegateData.m_Backends,
264 isSupported,
265 setBackend,
266 inputTensorInfo,
267 outputTensorInfo,
268 desc);
269 };
270
271 if (!delegateData.m_Network)
272 {
273 validateFunc(outputTensorInfo, isSupported);
274 return isSupported ? kTfLiteOk : kTfLiteError;
275 }
276
277 // Setup layer and connect.
Mike Kellya2806502023-08-03 10:42:11 +0100278 auto layerName = GetName(armnn::LayerType::Mean, nodeIndex);
279 armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc, layerName.c_str());
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100280 meanLayer->SetBackendId(setBackend);
281 ARMNN_ASSERT(meanLayer != nullptr);
282
283 armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
284 outputSlot.SetTensorInfo(outputTensorInfo);
285
286 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100287 if (ProcessInputs(meanLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Matthew Sloyan2b04ec32023-04-26 11:42:46 +0100288 {
289 return kTfLiteError;
290 }
291
292 return Connect(meanLayer, tfLiteContext, tfLiteNode, delegateData);
293}
294
295TfLiteStatus VisitControlOperator(DelegateData& delegateData,
296 TfLiteOpaqueContext* tfLiteContext,
297 TfLiteOpaqueNode* tfLiteNode,
298 int nodeIndex,
299 int32_t operatorCode)
300{
301 switch(operatorCode)
302 {
303 case kTfLiteBuiltinConcatenation:
304 return VisitConcatenationOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
305 case kTfLiteBuiltinMean:
306 return VisitMeanOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
307 default:
308 return kTfLiteError;
309 }
310}
311
312} // namespace armnnDelegate
313