blob: 7e02de1bdf61ba1b7ef9e355e0f2bdb7e4e8309b [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Finn Williams6f9f9902020-11-13 13:23:15 +00008#include <armnn/utility/IgnoreUnused.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
Matthew Sloyan91c41712020-11-13 09:47:35 +000013#include <tensorflow/lite/kernels/internal/tensor_ctypes.h>
Sadik Armagan62483be2020-10-23 17:14:43 +010014#include <tensorflow/lite/minimal_logging.h>
15
Matthew Sloyan91c41712020-11-13 09:47:35 +000016#include <algorithm>
17#include <iterator>
18#include <string>
19#include <vector>
20
Sadik Armagan62483be2020-10-23 17:14:43 +010021namespace armnnDelegate
22{
23
Matthew Sloyan91c41712020-11-13 09:47:35 +000024void SetupConcatViewOrigin(const armnn::TensorInfo& inputTensorInfo,
25 armnn::OriginsDescriptor& concatDescriptor,
26 const unsigned int concatAxis,
27 unsigned int inputIndex,
28 unsigned int& mergeDimOrigin)
29{
30 const uint32_t inputRank = concatDescriptor.GetNumDimensions();
31
32 // double check dimensions of the tensors
33 if (inputTensorInfo.GetNumDimensions() != inputRank)
34 {
35 throw armnn::ParseException("The number of dimensions for input tensors "
36 "of the concatenation operator should be: " + std::to_string(inputRank));
37 }
38
39 for (unsigned int j = 0; j < concatAxis; ++j)
40 {
41 concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
42 }
43
44 concatDescriptor.SetViewOriginCoord(inputIndex, concatAxis, mergeDimOrigin);
45 mergeDimOrigin += inputTensorInfo.GetShape()[concatAxis];
46
47 for (unsigned int j = concatAxis + 1; j < inputRank; ++j)
48 {
49 concatDescriptor.SetViewOriginCoord(inputIndex, j, 0);
50 }
51}
52
53TfLiteStatus VisitConcatenationOperator(DelegateData& delegateData,
54 TfLiteContext* tfLiteContext,
55 TfLiteNode* tfLiteNode,
56 int nodeIndex,
57 int32_t tfLiteConcatOperatorCode)
58{
59 unsigned int numInputs = tfLiteNode->inputs->size;
60 if (numInputs < 2)
61 {
62 TF_LITE_MAYBE_KERNEL_LOG(
63 tfLiteContext, "TfLiteArmnnDelegate: Minimum number of inputs (%d != %d) in node #%d",
64 2, numInputs, nodeIndex);
65 return kTfLiteError;
66 }
67 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
68
69 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
70
71 std::vector<armnn::TensorInfo> inputTensorInfos;
72 for (unsigned int i = 0; i < numInputs; ++i)
73 {
74 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[i]];
Sadik Armagan529195f2022-01-14 12:56:35 +000075 if (!IsValid(tfLiteContext, tfLiteInputTensor, tfLiteConcatOperatorCode, nodeIndex))
Matthew Sloyan91c41712020-11-13 09:47:35 +000076 {
Matthew Sloyan91c41712020-11-13 09:47:35 +000077 return kTfLiteError;
78 }
79
80 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
81 inputTensorInfos.emplace_back(inputTensorInfo);
82 }
83
84 // Convert input tensors to const armnn::TensorInfo* type for FORWARD_LAYER_SUPPORT_FUNC.
85 std::vector<const armnn::TensorInfo*> inputConstTensorInfos;
86 std::transform(inputTensorInfos.begin(),
87 inputTensorInfos.end(),
88 std::back_inserter(inputConstTensorInfos),
89 [](armnn::TensorInfo& t)->const armnn::TensorInfo*{ return &t; });
90
91 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
Sadik Armagan529195f2022-01-14 12:56:35 +000092 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteConcatOperatorCode, nodeIndex))
Matthew Sloyan91c41712020-11-13 09:47:35 +000093 {
Matthew Sloyan91c41712020-11-13 09:47:35 +000094 return kTfLiteError;
95 }
96
97 // Setup OriginsDescriptor, axis and view origin
98 unsigned int numConcatView = static_cast<unsigned int>(numInputs);
99 uint32_t inputRank = tfLiteTensors[tfLiteNode->inputs->data[0]].dims->size;
100
101 auto* concatenationParameters = reinterpret_cast<TfLiteConcatenationParams*>(tfLiteNode->builtin_data);
102 const unsigned int concatDimInput = static_cast<unsigned int>(
103 (static_cast<int>(inputRank) + concatenationParameters->axis) % static_cast<int>(inputRank));
104
105 armnn::OriginsDescriptor concatDescriptor(static_cast<uint32_t>(numConcatView), inputRank);
106 concatDescriptor.SetConcatAxis(concatDimInput);
107
108 unsigned int mergeDimOrigin = 0;
109 for (unsigned int viewIndex = 0; viewIndex < numConcatView; ++viewIndex)
110 {
111 armnn::TensorInfo inputTensorInfo = GetTensorInfoForTfLiteTensor(
112 tfLiteTensors[tfLiteNode->inputs->data[viewIndex]]);
113
114 // Sets up concatDescriptor view origin
115 SetupConcatViewOrigin(inputTensorInfo, concatDescriptor, concatDimInput, viewIndex, mergeDimOrigin);
116 }
117
118 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
119
120 // Check if supported
121 bool isSupported = false;
122 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
123 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000124 FORWARD_LAYER_SUPPORT_FUNC("CONCATENATION",
Matthew Sloyan91c41712020-11-13 09:47:35 +0000125 tfLiteContext,
126 IsConcatSupported,
127 delegateData.m_Backends,
128 isSupported,
129 inputConstTensorInfos,
130 outputTensorInfo,
131 concatDescriptor);
132 };
133
134 if (!delegateData.m_Network)
135 {
136 validateFunc(outputTensorInfo, isSupported);
137 return isSupported ? kTfLiteOk : kTfLiteError;
138 }
139
140 // Setup layer and connect.
141 armnn::IConnectableLayer* concatenationLayer = delegateData.m_Network->AddConcatLayer(concatDescriptor);
142 ARMNN_ASSERT(concatenationLayer != nullptr);
143
Sadik Armagan529195f2022-01-14 12:56:35 +0000144 // Connect the Constant Inputs
145 auto inputsTensorsProcess = ProcessInputs(concatenationLayer,
146 delegateData,
147 tfLiteContext,
148 tfLiteNode);
149 if (inputsTensorsProcess == kTfLiteError)
150 {
151 return inputsTensorsProcess;
152 }
153
Matthew Sloyan91c41712020-11-13 09:47:35 +0000154 armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
155 outputSlot.SetTensorInfo(outputTensorInfo);
156 Connect(concatenationLayer, tfLiteNode, delegateData);
157
158 if (!concatenationParameters)
159 {
160 // No Activation
161 return kTfLiteOk;
162 }
163
164 // Check activation
165 TfLiteFusedActivation activationType = concatenationParameters->activation;
166 return FusedActivation(tfLiteContext, tfLiteNode, activationType, concatenationLayer, 0, delegateData);
167}
168
169TfLiteStatus VisitMeanOperator(DelegateData& delegateData,
170 TfLiteContext* tfLiteContext,
171 TfLiteNode* tfLiteNode,
172 int nodeIndex,
173 int32_t tfLiteMeanOperatorCode)
174{
175 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
176 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
177
178 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
179 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
180 if(!IsValid(&tfLiteInputTensor))
181 {
182 TF_LITE_MAYBE_KERNEL_LOG(
183 tfLiteContext,
184 "TfLiteArmnnDelegate: Invalid input tensor in operator #%d node #%d: ",
185 tfLiteMeanOperatorCode, nodeIndex);
186 return kTfLiteError;
187 }
188 if (IsDynamicTensor(tfLiteInputTensor))
189 {
190 TF_LITE_MAYBE_KERNEL_LOG(
191 tfLiteContext,
192 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
193 tfLiteMeanOperatorCode, nodeIndex);
194 return kTfLiteError;
195 }
196
197 const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
198 if(!IsValid(&tfLiteAxisTensor))
199 {
200 TF_LITE_MAYBE_KERNEL_LOG(
201 tfLiteContext,
202 "TfLiteArmnnDelegate: Invalid axis tensor in operator #%d node #%d: ",
203 tfLiteMeanOperatorCode, nodeIndex);
204 return kTfLiteError;
205 }
206 if (IsDynamicTensor(tfLiteAxisTensor))
207 {
208 TF_LITE_MAYBE_KERNEL_LOG(
209 tfLiteContext,
210 "TfLiteArmnnDelegate: Dynamic axis tensors are not supported in operator #%d node #%d: ",
211 tfLiteMeanOperatorCode, nodeIndex);
212 return kTfLiteError;
213 }
214
215 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
216 if(!IsValid(&tfLiteOutputTensor))
217 {
218 TF_LITE_MAYBE_KERNEL_LOG(
219 tfLiteContext,
220 "TfLiteArmnnDelegate: Invalid output tensor in operator #%d node #%d: ",
221 tfLiteAxisTensor, nodeIndex);
222 return kTfLiteError;
223 }
224 if (IsDynamicTensor(tfLiteOutputTensor))
225 {
226 TF_LITE_MAYBE_KERNEL_LOG(
227 tfLiteContext,
228 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
229 tfLiteMeanOperatorCode, nodeIndex);
230 return kTfLiteError;
231 }
232
233 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
234 const armnn::TensorInfo& axisTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteAxisTensor);
235 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
236
237 auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
238
239 std::vector<int32_t> axis;
240 // Add axis data to vector to be converter to unsigned int and assigned to descriptor axis.
241 for (unsigned int i = 0; i < axisTensorInfo.GetNumElements(); ++i)
242 {
243 axis.emplace_back(axisTensorData[i]);
244 }
245
246 // Convert the axis to unsigned int and remove duplicates.
247 unsigned int rank = inputTensorInfo.GetNumDimensions();
248 std::set<unsigned int> uniqueAxis;
249 std::transform(axis.begin(),
250 axis.end(),
251 std::inserter(uniqueAxis, uniqueAxis.begin()),
252 [rank](int i)->unsigned int{ return (i + rank) % rank; });
253
254 // Setup MeanDescriptor and assign axis and keepDims
255 armnn::MeanDescriptor desc;
256 desc.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
257 desc.m_KeepDims = inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ? true : false;
258
259 // Check if supported
260 bool isSupported = false;
261 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
262 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000263 FORWARD_LAYER_SUPPORT_FUNC("MEAN",
Matthew Sloyan91c41712020-11-13 09:47:35 +0000264 tfLiteContext,
265 IsMeanSupported,
266 delegateData.m_Backends,
267 isSupported,
268 inputTensorInfo,
269 outputTensorInfo,
270 desc);
271 };
272
273 if (!delegateData.m_Network)
274 {
275 validateFunc(outputTensorInfo, isSupported);
276 return isSupported ? kTfLiteOk : kTfLiteError;
277 }
278
279 // Setup layer and connect.
280 armnn::IConnectableLayer* meanLayer = delegateData.m_Network->AddMeanLayer(desc);
281 ARMNN_ASSERT(meanLayer != nullptr);
282
283 armnn::IOutputSlot& outputSlot = meanLayer->GetOutputSlot(0);
284 outputSlot.SetTensorInfo(outputTensorInfo);
285 return Connect(meanLayer, tfLiteNode, delegateData);
286}
287
Sadik Armagan62483be2020-10-23 17:14:43 +0100288TfLiteStatus VisitControlOperator(DelegateData& delegateData,
289 TfLiteContext* tfLiteContext,
290 TfLiteNode* tfLiteNode,
291 int nodeIndex,
Matthew Sloyan91c41712020-11-13 09:47:35 +0000292 int32_t operatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +0100293{
Finn Williams6f9f9902020-11-13 13:23:15 +0000294 armnn::IgnoreUnused(delegateData,
295 tfLiteContext,
296 tfLiteNode,
297 nodeIndex,
Matthew Sloyan91c41712020-11-13 09:47:35 +0000298 operatorCode);
299
300 switch(operatorCode)
301 {
302 case kTfLiteBuiltinConcatenation:
303 return VisitConcatenationOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
304 case kTfLiteBuiltinMean:
305 return VisitMeanOperator(delegateData, tfLiteContext, tfLiteNode, nodeIndex, operatorCode);
306 default:
307 return kTfLiteError;
308 }
Sadik Armagan62483be2020-10-23 17:14:43 +0100309}
310
311} // namespace armnnDelegate