blob: dc424cff00c0821b9de3e41935f78f518356eb7e [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Ryan OSheaa37ccb02023-04-11 10:54:07 +01005#pragma once
6
Matthew Sloyanc49aacc2023-04-28 17:27:26 +01007#include <OpaqueDelegateUtils.hpp>
Ryan OSheaa37ccb02023-04-11 10:54:07 +01008
9namespace armnnOpaqueDelegate
10{
11
12TfLiteStatus VisitCastOperator(DelegateData& delegateData,
13 TfLiteOpaqueContext* tfLiteContext,
14 TfLiteOpaqueNode* tfLiteNode,
15 int nodeIndex,
16 int32_t operatorCode)
17{
18 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
19 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
20 int numInputs = 0;
21 const int* inputTensors;
22 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
23 {
24 return kTfLiteError;
25 }
26
27 // This layer only has 1 input, so we can directly assign tensor[0] to a new opaque tensor
28 const TfLiteOpaqueTensor*
29 tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[numInputs-1]);
30 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
31 {
32 return kTfLiteError;
33 }
34
35 int numOutputs = 0;
36 const int* outputTensors;
37 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
38 {
39 return kTfLiteError;
40 }
41
42 // This layer only has 1 output, so we can directly assign tensor[0] to a new opaque tensor
43 const TfLiteOpaqueTensor*
44 tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[numOutputs-1]);
45 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
46 {
47 return kTfLiteError;
48 }
49
50 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
51 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
52
53 bool isSupported = false;
54 armnn::BackendId setBackend;
55 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) {
56 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CAST",
Matthew Sloyanc49aacc2023-04-28 17:27:26 +010057 tfLiteContext,
58 IsCastSupported,
59 delegateData.m_Backends,
60 isSupported,
61 setBackend,
62 inputTensorInfo,
63 outInfo);
Ryan OSheaa37ccb02023-04-11 10:54:07 +010064 };
65
66 // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
67 // support for the operator
68 // If supported, VisitCastOperator will be called again to add the layer to the network as seen further below
69 if (!delegateData.m_Network)
70 {
71 validateFunc(outputTensorInfo, isSupported);
72 return isSupported ? kTfLiteOk : kTfLiteError;
73 }
74
75 // Add a Cast layer
76 armnn::IConnectableLayer* layer = delegateData.m_Network->AddCastLayer();
77 layer->SetBackendId(setBackend);
78 ARMNN_ASSERT(layer != nullptr);
79
80 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
81 outputSlot.SetTensorInfo(outputTensorInfo);
82
83 // try to connect the Constant Inputs if there are any
84 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
85 {
86 return kTfLiteError;
87 }
88
89 // Connect
90 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
91}
Matthew Sloyanc49aacc2023-04-28 17:27:26 +010092
93TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
94 TfLiteOpaqueContext* tfLiteContext,
95 TfLiteOpaqueNode* tfLiteNode,
96 int nodeIndex,
97 int32_t operatorCode)
98{
99 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
100
101 if (numInputs == 2)
102 {
103 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
104 }
105 else
106 {
107 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
108 }
109 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
110
111 // Gather input indices and use to get input tensor.
112 const int* inputTensors;
113 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
114 {
115 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
116 tfLiteContext,
117 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
118 nodeIndex);
119 return kTfLiteError;
120 }
121
122 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
123 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
124 {
125 return kTfLiteError;
126 }
127
128 // Gather output indices and use to get output tensors.
129 int numOutputs = 0;
130 const int* outputTensors;
131 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
132 {
133 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
134 tfLiteContext,
135 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
136 nodeIndex);
137 return kTfLiteError;
138 }
139
140 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
141 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
142 {
143 return kTfLiteError;
144 }
145
146 const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
147 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
148
149 armnn::ReshapeDescriptor reshapeDesc;
150 std::vector<int32_t> targetShape;
151
152 auto* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
153
154 // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
155 // Options might be set without valid data. we need to check the dimensions are in a valid range.
156 if (reshapeOptions && reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8)
157 {
158 for (int i = 0; i < reshapeOptions->num_dimensions; ++i)
159 {
160 targetShape.push_back(reshapeOptions->shape[i]);
161 }
162 }
163 else if (numInputs == 2)
164 {
165 // Get shape from the second input tensor
166 const TfLiteOpaqueTensor* tfLiteShapeInputTensor =
167 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
168 if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex))
169 {
170 return kTfLiteError;
171 }
172
173 int32_t numDims = TfLiteOpaqueTensorNumDims(tfLiteShapeInputTensor);
174 if (numDims != 1)
175 {
176 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
177 tfLiteContext,
178 "TfLiteArmnnOpaqueDelegate: Target 'shape' input is not a 1D tensor in "
179 "operator #%d node #%d: Falling back to TfLiteOptions.",
180 operatorCode, nodeIndex);
181 }
182 else
183 {
184 // Get the shape data out of the input tensor
185 auto* shapeTensorDataPtr = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteShapeInputTensor));
186 int32_t shapeTensorNumValues = TfLiteOpaqueTensorDim(tfLiteShapeInputTensor, 0);
187 for (int32_t i = 0; i < shapeTensorNumValues; ++i)
188 {
189 targetShape.push_back(shapeTensorDataPtr[i]);
190 }
191 }
192 }
193 else
194 {
195 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
196 tfLiteContext,
197 "TfLiteArmnnOpaqueDelegate: Target shape not defined in reshape parameters or input tensor. "
198 "At least one method required in operator #%d node #%d: ",
199 operatorCode, nodeIndex);
200 return kTfLiteError;
201 }
202
203 // Use the data to create the required tensor shape.
204 if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
205 {
206 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
207 tfLiteContext,
208 "TfLiteArmnnOpaqueDelegate: At most one component of shape can be -1 in: "
209 "operator #%d node #%d: ",
210 operatorCode, nodeIndex);
211 return kTfLiteError;
212 }
213
214 if (reshapeDesc.m_TargetShape.GetNumElements() != inputTensorInfo0.GetNumElements())
215 {
216 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
217 tfLiteContext,
218 "TfLiteArmnnOpaqueDelegate: Reshape, number of elements in output shape does not match input "
219 "operator #%d node #%d: ",
220 operatorCode, nodeIndex);
221 return kTfLiteError;
222 }
223
224 bool isSupported = false;
225 armnn::BackendId setBackend;
226 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
227 {
228 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
229 tfLiteContext,
230 IsReshapeSupported,
231 delegateData.m_Backends,
232 isSupported,
233 setBackend,
234 inputTensorInfo0,
235 outInfo,
236 reshapeDesc);
237 };
238
239 if (!delegateData.m_Network)
240 {
241 validateFunc(outputTensorInfo, isSupported);
242 return isSupported ? kTfLiteOk : kTfLiteError;
243 }
244
245 armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
246 layer->SetBackendId(setBackend);
247 ARMNN_ASSERT(layer != nullptr);
248
249 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
250 outputSlot.SetTensorInfo(outputTensorInfo);
251
252 // try to connect the Constant Inputs if there are any
253 if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
254 {
255 return kTfLiteError;
256 }
257
258 // Connect
259 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
260}
261
Ryan OSheaa37ccb02023-04-11 10:54:07 +0100262}