blob: 525529ff7b6d21b709069ed390f31e284d2b9fb7 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Teresa Charlinecebb0f2023-04-27 21:37:56 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9
10namespace armnnOpaqueDelegate
11{
12
13TfLiteStatus VisitUnpackOperator(DelegateData& delegateData,
14 TfLiteOpaqueContext* tfLiteContext,
15 TfLiteOpaqueNode* tfLiteNode,
16 int nodeIndex,
17 int32_t operatorCode)
18{
19 // Check inputs
20 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
21
22 const int* inputTensors;
23 int numInputs;
24 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
25 {
26 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
27 tfLiteContext,
28 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
29 nodeIndex);
30 return kTfLiteError;
31 }
32 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
33 inputTensors[0]);
34 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
35 {
36 return kTfLiteError;
37 }
38
39 auto* tfLiteNodeParameters = reinterpret_cast<TfLiteUnpackParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
40 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
41
42 // Get Unpack Axis
43 const unsigned int unpackAxis = NonNegative(tfLiteNodeParameters->axis, nodeIndex);
44
45 if (unpackAxis >= inputTensorInfo.GetNumDimensions())
46 {
47 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
48 tfLiteContext,
49 "TfLiteArmnnOpaqueDelegate: The unpack axis #%d cannot be greater than or equal to "
50 "the number of input dimensions #%d in operator #%d node #%d",
51 unpackAxis, inputTensorInfo.GetNumDimensions(), operatorCode, nodeIndex);
52 return kTfLiteError;
53 }
54
55 // Get Unpack Num
56 unsigned int unpackNum = NonNegative(tfLiteNodeParameters->num, nodeIndex);
57
58 // If num is not defined, automatically infer from the length of the dimension axis.
59 if(unpackNum == 0)
60 {
61 unpackNum = inputTensorInfo.GetShape()[unpackAxis];
62 }
63
64 // If unpack number cannot be inferred and is still zero, return kTfLiteError.
65 if(unpackNum == 0)
66 {
67 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
68 tfLiteContext,
69 "TfLiteArmnnOpaqueDelegate: Number to unpack must greater than zero in operator #%d node #%d: ",
70 operatorCode, nodeIndex);
71 return kTfLiteError;
72 }
73
74 // Check outputs
75 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, unpackNum, nodeIndex));
76
77 auto inputDimSize = inputTensorInfo.GetNumDimensions();
78 std::vector<unsigned int> unpackDimSizes(inputDimSize);
79
80 // Add current input shape to unpackDimSizes
81 for (unsigned int i = 0; i < inputDimSize; ++i)
82 {
83 unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
84 }
85
86 if (unpackDimSizes[unpackAxis] != unpackNum)
87 {
88 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
89 tfLiteContext,
90 "TfLiteArmnnOpaqueDelegate: Number to unpack must be the same as length "
91 "of the dimension to unpack along in operator #%d node #%d: ",
92 operatorCode, nodeIndex);
93 return kTfLiteError;
94 }
95
96 unpackDimSizes[unpackAxis] /= unpackNum;
97
98 armnn::SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
Mike Kelly363b5722023-10-11 14:25:50 +010099 splitDesc.SetAxis(unpackAxis);
100
Teresa Charlinecebb0f2023-04-27 21:37:56 +0100101 for (unsigned int j = 0; j < unpackNum; ++j)
102 {
103 // Set the size of the views.
104 for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
105 {
106 splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
107 }
108 splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
109 }
110
111 // Gather output indices and use to get output tensors.
112 const int* outputTensors;
113 int numOutputs;
114 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
115 {
116 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
117 tfLiteContext,
118 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
119 nodeIndex);
120 return kTfLiteError;
121 }
122
123 // Validate all outputs and get TensorInfo
124 std::vector<armnn::TensorInfo> outputs;
125 for (unsigned int i = 0; i < unpackNum; ++i)
126 {
127 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext,
128 outputTensors[i]);
129 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
130 {
131 return kTfLiteError;
132 }
133
134 outputs.push_back(GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true));
135 }
136
137 const std::vector<std::reference_wrapper<armnn::TensorInfo>> outputTensorInfos(outputs.begin(), outputs.end());
138
139 // Determine the shape of the Splitter layer outputs for validation
140 armnn::TensorShape splitOutShape = armnn::TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
141 unpackDimSizes.data());
142
143 std::vector<armnn::TensorInfo> splitterOutputs;
144 for (unsigned int outputIndex = 0; outputIndex < outputTensorInfos.size(); ++outputIndex)
145 {
146 splitterOutputs.push_back(armnn::TensorInfo(splitOutShape,
147 outputTensorInfos[outputIndex].get().GetDataType(),
148 outputTensorInfos[outputIndex].get().GetQuantizationScale(),
149 outputTensorInfos[outputIndex].get().GetQuantizationOffset()));
150 }
151 std::vector<std::reference_wrapper<armnn::TensorInfo>> splitterOutputTensorInfos(splitterOutputs.begin(),
152 splitterOutputs.end());
153
154 armnn::BackendId setBackendSplit;
155 if (!delegateData.m_Network)
156 {
157 // Check if splitter is supported
158 bool isSupported = false;
159 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("UNPACK",
160 tfLiteContext,
161 IsSplitterSupported,
162 delegateData.m_Backends,
163 isSupported,
164 setBackendSplit,
165 inputTensorInfo,
166 splitterOutputTensorInfos,
167 splitDesc);
168 return isSupported ? kTfLiteOk : kTfLiteError;
169 }
170
171 // Create Reshape descriptor from the first outputTensorInfo to validate a single Reshape layer
172 // Use this descriptor later when creating every ReshapeLayer as all Reshape Layers should be the same
173 armnn::ReshapeDescriptor reshapeDescriptor;
174 reshapeDescriptor.m_TargetShape = outputTensorInfos[0].get().GetShape();
175
176 armnn::BackendId setBackendReshape;
177 if (!delegateData.m_Network)
178 {
179 bool isSupported = false;
180 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
181 tfLiteContext,
182 IsReshapeSupported,
183 delegateData.m_Backends,
184 isSupported,
185 setBackendReshape,
186 splitterOutputTensorInfos[0],
187 outputTensorInfos[0],
188 reshapeDescriptor);
189 return isSupported ? kTfLiteOk : kTfLiteError;
190 };
191
Mike Kellya2806502023-08-03 10:42:11 +0100192 auto layerName = GetName(armnn::LayerType::Splitter, nodeIndex, "Unpack");
Teresa Charlinecebb0f2023-04-27 21:37:56 +0100193 armnn::IConnectableLayer* splitterLayer = delegateData.m_Network->AddSplitterLayer(splitDesc,
Mike Kellya2806502023-08-03 10:42:11 +0100194 layerName.c_str());
Teresa Charlinecebb0f2023-04-27 21:37:56 +0100195 splitterLayer->SetBackendId(setBackendSplit);
196 ARMNN_ASSERT(splitterLayer != nullptr);
197
198 for (unsigned int k = 0; k < splitterLayer->GetNumOutputSlots(); ++k)
199 {
200 splitterLayer->GetOutputSlot(k).SetTensorInfo(outputs[k]);
201 }
202
203 // Connect the input slots
204 auto inputIndex = static_cast<unsigned int>(inputTensors[0]);
205 delegateData.m_OutputSlotForNode[inputIndex]->Connect(splitterLayer->GetInputSlot(0));
206
207 // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
208 for (unsigned int outputIndex = 0; outputIndex < splitterLayer->GetNumOutputSlots(); ++outputIndex)
209 {
Mike Kellya2806502023-08-03 10:42:11 +0100210 auto reshapeLayerName = GetName(armnn::LayerType::Reshape, nodeIndex, "Unpack");
Teresa Charlinecebb0f2023-04-27 21:37:56 +0100211 armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor,
212 reshapeLayerName.c_str());
213 reshapeLayer->SetBackendId(setBackendReshape);
214 ARMNN_ASSERT(reshapeLayer != nullptr);
215
216 splitterLayer->GetOutputSlot(outputIndex).SetTensorInfo(splitterOutputTensorInfos[outputIndex]);
217 splitterLayer->GetOutputSlot(outputIndex).Connect(reshapeLayer->GetInputSlot(0));
218
219 armnn::TensorInfo outputTensorInfo = outputTensorInfos[outputIndex];
220 reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
221
222 armnn::IOutputSlot& slot = reshapeLayer->GetOutputSlot(0);
223
224 delegateData.m_OutputSlotForNode[
225 static_cast<unsigned long>(static_cast<unsigned int>(outputTensors[outputIndex]))] = &slot;
226
227 }
228
229 return kTfLiteOk;
230}
231
232} // namespace armnnOpaqueDelegate