blob: 7876b7b39830b86df8891a897d357018488d3552 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Teresa Charlin86b03572023-04-28 13:19:12 +01005
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9
10namespace armnnOpaqueDelegate
11{
12
13TfLiteStatus VisitSliceOperator(DelegateData& delegateData,
14 TfLiteOpaqueContext* tfLiteContext,
15 TfLiteOpaqueNode* tfLiteNode,
16 int nodeIndex,
17 int32_t tfLiteSliceOperatorCode)
18{
19
20 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 3, nodeIndex));
21 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
22
23 // Read inputs [input, begin, size]
24 // Gather input indices and use to get input tensor.
25 const int* inputTensors;
26 int numInputs;
27 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
28 {
29 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
30 tfLiteContext,
31 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
32 nodeIndex);
33 return kTfLiteError;
34 }
35
36 std::vector<const TfLiteOpaqueTensor*> tfLiteInputTensors;
37 tfLiteInputTensors.reserve(numInputs);
38 for (int i = 0; i < numInputs; i++)
39 {
40 const TfLiteOpaqueTensor* inputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[i]);
41 tfLiteInputTensors.push_back(inputTensor);
42 if (!IsValid(tfLiteContext, inputTensor, tfLiteSliceOperatorCode, nodeIndex))
43 {
44 return kTfLiteError;
45 }
46 }
47
48 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensors[0]);
49
50 // We save the begin and size tensors in our descriptor. Therefore we have to read those values from inputs
51 unsigned int inputRank = inputTensorInfo.GetNumDimensions();
Mike Kelly04f71202023-05-05 15:35:18 +010052 auto ReadInt32Input = [&](int inputIndex, std::vector<int32_t>& outputData, const char* name) -> TfLiteStatus
Teresa Charlin86b03572023-04-28 13:19:12 +010053 {
54 if (TfLiteOpaqueTensorType(tfLiteInputTensors[inputIndex]) != kTfLiteInt32)
55 {
56 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
57 tfLiteContext,
Mike Kelly04f71202023-05-05 15:35:18 +010058 "TfLiteArmnnOpaqueDelegate: The %s Tensor of the Slice operation needs to "
Teresa Charlin86b03572023-04-28 13:19:12 +010059 "be of type int32. Operator: #%d node #%d: ",
Mike Kelly04f71202023-05-05 15:35:18 +010060 name, tfLiteSliceOperatorCode, nodeIndex);
Teresa Charlin86b03572023-04-28 13:19:12 +010061 return kTfLiteError;
62 }
63 uint32_t rank = TfLiteOpaqueTensorNumDims(tfLiteInputTensors[inputIndex]);
64 if (rank != 1)
65 {
66 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
67 tfLiteContext,
Mike Kelly04f71202023-05-05 15:35:18 +010068 "TfLiteArmnnOpaqueDelegate: The %s Tensor of the Slice operation needs to "
Teresa Charlin86b03572023-04-28 13:19:12 +010069 "be a 1D-Tensor. Operator: #%d node #%d: ",
Mike Kelly04f71202023-05-05 15:35:18 +010070 name, tfLiteSliceOperatorCode, nodeIndex);
Teresa Charlin86b03572023-04-28 13:19:12 +010071 return kTfLiteError;
72 }
73 uint32_t numValues = TfLiteOpaqueTensorDim(tfLiteInputTensors[inputIndex], 0);
74 if (numValues != inputRank)
75 {
76 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
77 tfLiteContext,
Mike Kelly04f71202023-05-05 15:35:18 +010078 "TfLiteArmnnOpaqueDelegate: The number of values in the %s Tensor of the "
79 "Slice operation needs to be equal to the rank of the Input Tensor. Operator: #%d node #%d: ",
80 name, tfLiteSliceOperatorCode, nodeIndex);
Teresa Charlin86b03572023-04-28 13:19:12 +010081 return kTfLiteError;
82 }
83 // return tensor data
Mike Kelly04f71202023-05-05 15:35:18 +010084 auto* tensorDataPtr = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteInputTensors[inputIndex]));
Teresa Charlin86b03572023-04-28 13:19:12 +010085 outputData.assign(tensorDataPtr, tensorDataPtr + numValues);
86 return kTfLiteOk;
87 };
88
Mike Kelly04f71202023-05-05 15:35:18 +010089 std::vector<int32_t> signedBegin;
90 if (ReadInt32Input(1, signedBegin, "Begin") != kTfLiteOk)
91 {
Teresa Charlin86b03572023-04-28 13:19:12 +010092 return kTfLiteError;
Mike Kelly04f71202023-05-05 15:35:18 +010093 }
94
95 std::vector<int32_t> signedSize;
96 if (ReadInt32Input(2, signedSize, "Size") != kTfLiteOk)
97 {
Teresa Charlin86b03572023-04-28 13:19:12 +010098 return kTfLiteError;
Mike Kelly04f71202023-05-05 15:35:18 +010099 }
100
101 std::vector<uint32_t> begin({ signedBegin.begin(), signedBegin.end() });
102 std::vector<uint32_t> size(signedSize.size());
103
104 for (unsigned int i = 0; i < signedSize.size(); ++i)
105 {
106 int signedValue = signedSize[i];
107 if (signedValue < -1 || signedValue > TfLiteOpaqueTensorDim(tfLiteInputTensors[0], i) - signedBegin[i])
108 {
109 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
110 tfLiteContext,
111 "TfLiteArmnnDelegate: Invalid value for Size. Size must be in range [-1, inputDimSize - begin] "
112 "[-1, %d] inclusive but was %d Operator: #%d node #%d: ",
113 TfLiteOpaqueTensorDim(tfLiteInputTensors[0], i) - signedBegin[i], signedValue,
114 tfLiteSliceOperatorCode, nodeIndex);
115 return kTfLiteError;
116 }
117 if (signedValue == -1)
118 {
119 size[i] = TfLiteOpaqueTensorDim(tfLiteInputTensors[0], i) - signedBegin[i];
120 }
121 else
122 {
123 size[i] = static_cast<uint32_t>(signedValue);
124 }
125 }
Teresa Charlin86b03572023-04-28 13:19:12 +0100126
127 // Write all data to the descriptor
128 armnn::SliceDescriptor descriptor(begin, size);
129
130 // Validate output
131 // Gather output indices and use to get output tensor.
132 const int* outputTensors;
133 int numOutputs;
134 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
135 {
136 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
137 tfLiteContext,
138 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
139 nodeIndex);
140 return kTfLiteError;
141 }
142
143 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
144 if (!IsValid(tfLiteContext, tfLiteOutputTensor, tfLiteSliceOperatorCode, nodeIndex))
145 {
146 return kTfLiteError;
147 }
148
149 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
150
151 bool isSupported = false;
152 armnn::BackendId setBackend;
153 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
154 {
155 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SLICE",
156 tfLiteContext,
157 IsSliceSupported,
158 delegateData.m_Backends,
159 isSupported,
160 setBackend,
161 inputTensorInfo,
162 outInfo,
163 descriptor);
164 };
165
166 if (!delegateData.m_Network)
167 {
168 validateFunc(outputTensorInfo, isSupported);
169 return isSupported ? kTfLiteOk : kTfLiteError;
170 }
171
172 // Add a Slice layer
Mike Kellya2806502023-08-03 10:42:11 +0100173 auto layerName = GetName(armnn::LayerType::Slice, nodeIndex);
Mike Kelly04f71202023-05-05 15:35:18 +0100174 armnn::IConnectableLayer* layer = delegateData.m_Network->AddSliceLayer(descriptor, layerName.c_str());
Teresa Charlin86b03572023-04-28 13:19:12 +0100175 layer->SetBackendId(setBackend);
176 ARMNN_ASSERT(layer != nullptr);
177
178 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
179 outputSlot.SetTensorInfo(outputTensorInfo);
180
181 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100182 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Teresa Charlin86b03572023-04-28 13:19:12 +0100183 {
184 return kTfLiteError;
185 }
186
187 // Connect
188 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
189}
190
191} // namespace armnnOpaqueDelegate
192