blob: 0ad65ca54c35199c8c2802ed4077fd5f4ab9ac64 [file] [log] [blame]
Tianle Cheng92ce35c2023-07-25 16:41:00 +01001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <OpaqueDelegateUtils.hpp>
9
10namespace armnnOpaqueDelegate
11{
12TfLiteStatus ValidateTileOperator(DelegateData& delegateData,
13 TfLiteOpaqueContext *tfLiteContext,
14 const armnn::TensorInfo& inputInfo,
15 const armnn::TensorInfo& outputInfo,
16 const armnn::TileDescriptor& descriptor)
17{
18 bool isSupported = false;
19 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("TILE",
20 tfLiteContext,
21 IsTileSupported,
22 delegateData.m_Backends,
23 isSupported,
24 armnn::BackendId(),
25 inputInfo,
26 outputInfo,
27 descriptor);
28 return isSupported ? kTfLiteOk : kTfLiteError;
29}
30
31TfLiteStatus VisitTileOperator(DelegateData& delegateData,
32 TfLiteOpaqueContext* tfLiteContext,
33 TfLiteOpaqueNode* tfLiteNode,
34 int nodeIndex,
35 int32_t tileOperatorCode)
36{
37 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
38 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
39
40 // Gather input tensors
41 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
42 const int* inputTensors;
43 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
44 {
45 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
46 tfLiteContext,
47 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
48 nodeIndex);
49 return kTfLiteError;
50 }
51
52 // Gather output tensors
53 int numOutputs = 0;
54 const int* outputTensors;
55 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
56 {
57 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
58 tfLiteContext,
59 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
60 nodeIndex);
61 return kTfLiteError;
62 }
63
64 // The input contains the data that should be tiled
65 const TfLiteOpaqueTensor* tfLiteInputTensor =
66 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
67 if (IsDynamicTensor(tfLiteInputTensor))
68 {
69 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
70 tfLiteContext,
71 "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
72 tileOperatorCode, nodeIndex);
73 return kTfLiteError;
74 }
75
76 // The multiples tensor contains the number of copies for each axis
77 const TfLiteOpaqueTensor* tfLiteMultiplesTensor =
78 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);;
79 if (IsDynamicTensor(tfLiteMultiplesTensor))
80 {
81 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
82 tfLiteContext,
83 "TfLiteArmnnOpaqueDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
84 tileOperatorCode, nodeIndex);
85 return kTfLiteError;
86 }
87
88 // The output tensor
89 const TfLiteOpaqueTensor* tfLiteOutputTensor =
90 TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
91 if (IsDynamicTensor(tfLiteOutputTensor))
92 {
93 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
94 tfLiteContext,
95 "TfLiteArmnnOpaqueDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
96 tileOperatorCode, nodeIndex);
97 return kTfLiteError;
98 }
99
100 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
101 const armnn::TensorInfo& multiplesTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteMultiplesTensor);
102 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
103
104 // Multiples length must be the same as the number of dimension in input tensor
105 if (multiplesTensorInfo.GetNumElements() != inputTensorInfo.GetNumDimensions())
106 {
107 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
108 tfLiteContext,
109 "TfLiteArmnnOpaqueDelegate:",
110 "The Multiples length must be the same as the number of dimension in input tensor",
111 "Operator: #%d node #%d: ",
112 tileOperatorCode, nodeIndex);
113 return kTfLiteError;
114 }
115
116 // Get the Multiples data: In armnn, the values of the multiples input tensor is saved in the operator descriptor
117 // We have to read it from the input tensor and write it the descriptor
118 auto* multiplesTensorDataPtr = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteMultiplesTensor));
119 auto multiplesTensorNum = TfLiteOpaqueTensorDim(tfLiteMultiplesTensor, 0);
120 std::vector<int32_t> multiplesIntData(multiplesTensorDataPtr, multiplesTensorDataPtr + multiplesTensorNum);
121
122 // The multiples must be positive
123 for (auto multiple : multiplesIntData)
124 {
125 if (multiple < 0)
126 {
127 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
128 tfLiteContext,
129 "TfLiteArmnnOpaqueDelegate: The Multiples must be positive values",
130 "Operator: #%d node #%d: ",
131 tileOperatorCode, nodeIndex);
132 return kTfLiteError;
133 }
134 }
135
136 // The original input from TFLite is int32, and we have to make it as uint32 for our descriptor
137 std::vector<uint32_t> multiplesUintData;
138 std::transform(multiplesIntData.begin(),
139 multiplesIntData.end(),
140 std::back_inserter(multiplesUintData),
141 [] (const int value)
142 {
143 return static_cast<uint32_t>(value);
144 });
145
146 armnn::TileDescriptor tileDescriptor;
147 tileDescriptor.m_Multiples = multiplesUintData;
148
149 // Check output dimensions
150 if (inputTensorInfo.GetNumDimensions() != outputTensorInfo.GetNumDimensions())
151 {
152 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
153 tfLiteContext,
154 "TfLiteArmnnOpaqueDelegate: Input tensor dimension and output tensor dimension differ",
155 "Operator: #%d node #%d: ",
156 tileOperatorCode, nodeIndex);
157 return kTfLiteError;
158 }
159
160 // No network pointer indicates that only support for this operator should be checked
161 if (!delegateData.m_Network)
162 {
163 return ValidateTileOperator(delegateData,
164 tfLiteContext,
165 inputTensorInfo,
166 outputTensorInfo,
167 tileDescriptor);
168 }
169
Mike Kellya2806502023-08-03 10:42:11 +0100170 auto layerName = GetName(armnn::LayerType::Tile, nodeIndex);
Tianle Cheng92ce35c2023-07-25 16:41:00 +0100171 armnn::IConnectableLayer* layer = delegateData.m_Network->AddTileLayer(tileDescriptor, layerName.c_str());
172
173 if (layer == nullptr)
174 {
175 return kTfLiteError;
176 }
177
178 layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
179
Mike Kellya2806502023-08-03 10:42:11 +0100180 if (ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Tianle Cheng92ce35c2023-07-25 16:41:00 +0100181 {
182 return kTfLiteError;
183 }
184
185 return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
186}
187
188} // namespace armnnOpaqueDelegate