blob: 0adf262c230c52152e0f8681cc0004d34c155ff1 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea3ad2e142023-01-13 10:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Matthew Sloyan2b04ec32023-04-26 11:42:46 +01008#include <DelegateUtils.hpp>
Finn Williams6f9f9902020-11-13 13:23:15 +00009
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
Matthew Sloyan91c41712020-11-13 09:47:35 +000013#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
Sadik Armagan62483be2020-10-23 17:14:43 +010014#include <tensorflow/lite/minimal_logging.h>
15
Matthew Sloyan91c41712020-11-13 09:47:35 +000016#include <algorithm>
17#include <iterator>
18#include <string>
19#include <vector>
20
Sadik Armagan62483be2020-10-23 17:14:43 +010021namespace armnnDelegate
22{
23
Matthew Sloyan91c41712020-11-13 09:47:35 +000024TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
25 TfLiteContext* tfLiteContext,
26 TfLiteNode* tfLiteNode,
27 int nodeIndex,
28 int32_t tfLiteConcatOperatorCode)
29{
30 unsigned int numInputs = tfLiteNode->inputs->size;
31 if (numInputs < 2)
32 {
33 TF_LITE_MAYBE_KERNEL_LOG(
34 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
35 2, numInputs, nodeIndex);
36 return kTfLiteError;
37 }
38 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
39
40 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
41
42 std::vector<armnn::TensorInfo> inputTensorInfos;
43 for (unsigned int i = 0; i < numInputs; ++i)
44 {
45 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[i]];
Sadik Armagan529195f2022-01-14 12:56:35 +000046 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteConcatOperatorCode, nodeIndex))
Matthew Sloyan91c41712020-11-13 09:47:35 +000047 {
Matthew Sloyan91c41712020-11-13 09:47:35 +000048 return kTfLiteError;
49 }
50
51 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
52 inputTensorInfos.emplace_back(inputTensorInfo);
53 }
54
55 // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
56 std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
57 std::transform(inputTensorInfos.begin(),
58 inputTensorInfos.end(),
59 std::back_inserter(inputConstTensorInfos),
60 [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
61
62 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
Sadik Armagan529195f2022-01-14 12:56:35 +000063 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteConcatOperatorCode, nodeIndex))
Matthew Sloyan91c41712020-11-13 09:47:35 +000064 {
Matthew Sloyan91c41712020-11-13 09:47:35 +000065 return kTfLiteError;
66 }
67
68 // Setup OriginsDescriptor, axis and view origin
69 unsigned int numConcatView = static_cast<unsigned int>(numInputs);
70 uint32_t inputRank = tfLiteTensors[tfLiteNode->inputs->data[0]].dims->size;
71
72 auto* concatenationParameters = reinterpret_cast<TfLiteConcatenationParams*>(tfLiteNode->builtin_data);
Ryan OShea3ad2e142023-01-13 10:19:20 +000073
74 if(!concatenationParameters)
75 {
76 throw armnn::Exception(&"TfLiteArmnnDelegate: Concat parameters are null in: " [ nodeIndex]);
77 }
78
Matthew Sloyan91c41712020-11-13 09:47:35 +000079 const unsigned int concatDimInput = static_cast<unsigned int>(
80 (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
81
82 armnn::OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
83 concatDescriptor.SetConcatAxis(concatDimInput);
84
85 unsigned int mergeDimOrigin = 0;
86 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
87 {
88 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(
89 tfLiteTensors[tfLiteNode->inputs->data[viewIndex]]);
90
91 // Sets up concatDescriptor view origin
92 SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
93 }
94
Sadik Armagan90a119b2022-08-05 16:12:49 +010095 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan91c41712020-11-13 09:47:35 +000096
Ryan OShea3ad2e142023-01-13 10:19:20 +000097 // Verify we support the fused activation before attempting to create a layer
98 TfLiteFusedActivation activationType = concatenationParameters->activation;
99
Ryan OShea3ad2e142023-01-13 10:19:20 +0000100 TfLiteStatus activationStatus = ValidateFusedActivationOperator(delegateData, tfLiteContext, outputTensorInfo,
101 outputTensorInfo, activationType);
102 if(activationStatus != kTfLiteOk)
103 {
104 return kTfLiteError;
105 }
106
Matthew Sloyan91c41712020-11-13 09:47:35 +0000107 // Check if supported
108 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100109 armnn::BackendId setBackend;
Matthew Sloyan91c41712020-11-13 09:47:35 +0000110 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
111 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000112 FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
Matthew Sloyan91c41712020-11-13 09:47:35 +0000113 tfLiteContext,
114 IsConcatSupported,
115 delegateData.m_Backends,
116 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100117 setBackend,
Matthew Sloyan91c41712020-11-13 09:47:35 +0000118 inputConstTensorInfos,
119 outputTensorInfo,
120 concatDescriptor);
121 };
122
123 if (!delegateData.m_Network)
124 {
125 validateFunc(outputTensorInfo, isSupported);
126 return isSupported ? kTfLiteOk : kTfLiteError;
127 }
128
129 // Setup layer and connect.
Mike Kelly07169c82023-08-02 13:23:09 +0100130 auto layerName = GetLayerName(armnn::LayerType::Concat, nodeIndex);
131 armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor,
132 layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100133 concatenationLayer->SetBackendId(setBackend);
Matthew Sloyan91c41712020-11-13 09:47:35 +0000134 ARMNN_ASSERT(concatenationLayer != nullptr);
135
Sadik Armagan529195f2022-01-14 12:56:35 +0000136 // Connect the Constant Inputs
137 auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
138 delegateData,
139 tfLiteContext,
Mike Kelly07169c82023-08-02 13:23:09 +0100140 tfLiteNode,
141 nodeIndex);
Sadik Armagan529195f2022-01-14 12:56:35 +0000142 if (inputsTensorsProcess == kTfLiteError)
143 {
144 return inputsTensorsProcess;
145 }
146
Matthew Sloyan91c41712020-11-13 09:47:35 +0000147 armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
148 outputSlot.SetTensorInfo(outputTensorInfo);
Ryan OSheaa544f0f2023-01-25 18:10:20 +0000149 if(Connect(concatenationLayer, tfLiteNode, delegateData) != kTfLiteOk)
150 {
151 return kTfLiteError;
152 }
Matthew Sloyan91c41712020-11-13 09:47:35 +0000153
Ryan OShea3ad2e142023-01-13 10:19:20 +0000154 if (activationType == kTfLiteActNone)
Matthew Sloyan91c41712020-11-13 09:47:35 +0000155 {
156 // No Activation
157 return kTfLiteOk;
158 }
159
Ryan OShea3ad2e142023-01-13 10:19:20 +0000160 // Check and Create activation
Mike Kelly07169c82023-08-02 13:23:09 +0100161 return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData, nodeIndex);
Matthew Sloyan91c41712020-11-13 09:47:35 +0000162}
163
164TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
165 TfLiteContext* tfLiteContext,
166 TfLiteNode* tfLiteNode,
167 int nodeIndex,
168 int32_t tfLiteMeanOperatorCode)
169{
170 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
171 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
172
173 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
174 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
175 if(!IsValid(&tfLiteInputTensor))
176 {
177 TF_LITE_MAYBE_KERNEL_LOG(
178 tfLiteContext,
179 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
180 tfLiteMeanOperatorCode, nodeIndex);
181 return kTfLiteError;
182 }
183 if (IsDynamicTensor(tfLiteInputTensor))
184 {
185 TF_LITE_MAYBE_KERNEL_LOG(
186 tfLiteContext,
187 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
188 tfLiteMeanOperatorCode, nodeIndex);
189 return kTfLiteError;
190 }
191
192 const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
193 if(!IsValid(&tfLiteAxisTensor))
194 {
195 TF_LITE_MAYBE_KERNEL_LOG(
196 tfLiteContext,
197 "TfLiteArmnnDelegate: Invalid axis tensor in operator #%d node #%d: ",
198 tfLiteMeanOperatorCode, nodeIndex);
199 return kTfLiteError;
200 }
201 if (IsDynamicTensor(tfLiteAxisTensor))
202 {
203 TF_LITE_MAYBE_KERNEL_LOG(
204 tfLiteContext,
205 "TfLiteArmnnDelegate: Dynamic axis tensors are not supported in operator #%d node #%d: ",
206 tfLiteMeanOperatorCode, nodeIndex);
207 return kTfLiteError;
208 }
209
210 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
211 if(!IsValid(&tfLiteOutputTensor))
212 {
213 TF_LITE_MAYBE_KERNEL_LOG(
214 tfLiteContext,
215 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
216 tfLiteAxisTensor, nodeIndex);
217 return kTfLiteError;
218 }
219 if (IsDynamicTensor(tfLiteOutputTensor))
220 {
221 TF_LITE_MAYBE_KERNEL_LOG(
222 tfLiteContext,
223 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
224 tfLiteMeanOperatorCode, nodeIndex);
225 return kTfLiteError;
226 }
227
228 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
229 const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100230 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan91c41712020-11-13 09:47:35 +0000231
232 auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
233
234 std::vector<int32_t> axis;
235 // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
236 for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
237 {
238 axis.emplace_back(axisTensorData[i]);
239 }
240
241 // Convert the axis to unsigned int and remove duplicates.
242 unsigned int rank = inputTensorInfo.GetNumDimensions();
243 std::set<unsigned int> uniqueAxis;
244 std::transform(axis.begin(),
245 axis.end(),
246 std::inserter(uniqueAxis, uniqueAxis.begin()),
247 [rank](int i)->unsigned int{ return (i + rank) % rank; });
248
249 // Setup MeanDescriptor and assign axis and keepDims
250 armnn::MeanDescriptor desc;
251 desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
252 desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
253
254 // Check if supported
255 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100256 armnn::BackendId setBackend;
Matthew Sloyan91c41712020-11-13 09:47:35 +0000257 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
258 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000259 FORWARD_LAYER_SUPPORT_FUNC("MEAN",
Matthew Sloyan91c41712020-11-13 09:47:35 +0000260 tfLiteContext,
261 IsMeanSupported,
262 delegateData.m_Backends,
263 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100264 setBackend,
Matthew Sloyan91c41712020-11-13 09:47:35 +0000265 inputTensorInfo,
266 outputTensorInfo,
267 desc);
268 };
269
270 if (!delegateData.m_Network)
271 {
272 validateFunc(outputTensorInfo, isSupported);
273 return isSupported ? kTfLiteOk : kTfLiteError;
274 }
275
276 // Setup layer and connect.
Mike Kelly07169c82023-08-02 13:23:09 +0100277 auto layerName = GetLayerName(armnn::LayerType::Mean, nodeIndex);
278 armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc, layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100279 meanLayer->SetBackendId(setBackend);
Matthew Sloyan91c41712020-11-13 09:47:35 +0000280 ARMNN_ASSERT(meanLayer != nullptr);
281
282 armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
283 outputSlot.SetTensorInfo(outputTensorInfo);
Ryan OShea4c231de2023-01-17 15:19:20 +0000284
285 // try to connect the Constant Inputs if there are any
Mike Kelly07169c82023-08-02 13:23:09 +0100286 if (ProcessInputs(meanLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Ryan OShea4c231de2023-01-17 15:19:20 +0000287 {
288 return kTfLiteError;
289 }
290
Matthew Sloyan91c41712020-11-13 09:47:35 +0000291 return Connect(meanLayer, tfLiteNode, delegateData);
292}
293
Sadik Armagan62483be2020-10-23 17:14:43 +0100294TfLiteStatus VisitControlOperator(DelegateData& delegateData,
295 TfLiteContext* tfLiteContext,
296 TfLiteNode* tfLiteNode,
297 int nodeIndex,
Matthew Sloyan91c41712020-11-13 09:47:35 +0000298 int32_t operatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +0100299{
Matthew Sloyan91c41712020-11-13 09:47:35 +0000300 switch(operatorCode)
301 {
302 case kTfLiteBuiltinConcatenation:
303 return VisitConcatenationOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
304 case kTfLiteBuiltinMean:
305 return VisitMeanOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
306 default:
307 return kTfLiteError;
308 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100309}
310
311} // namespace armnnDelegate