blob: a9c27fb9e6eb04a783b0832f283fcb6cac9a280b [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Finn Williams6f9f9902020-11-13 13:23:15 +00008#include <armnn/utility/IgnoreUnused.hpp>
9
David Monahan1670b0c2020-11-18 14:40:27 +000010#include "DelegateUtils.hpp"
11
Sadik Armagan62483be2020-10-23 17:14:43 +010012#include <tensorflow/lite/builtin_ops.h>
13#include <tensorflow/lite/c/builtin_op_data.h>
14#include <tensorflow/lite/c/common.h>
15#include <tensorflow/lite/minimal_logging.h>
David Monahan1670b0c2020-11-18 14:40:27 +000016#include <numeric>
Sadik Armagan62483be2020-10-23 17:14:43 +010017
18namespace armnnDelegate
19{
20
David Monahan1670b0c2020-11-18 14:40:27 +000021TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
22 const std::vector<int32_t>& targetShape,
23 armnn::ReshapeDescriptor& reshapeDesc)
24{
25 std::vector<unsigned int> outputDims(targetShape.begin(), targetShape.end());
26 const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1);
27
28 if (stretchDim != targetShape.end())
29 {
30 if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end())
31 {
32 // Return kTfLiteError and log the error after returning
33 return kTfLiteError;
34 }
35
36 auto targetNumElements =
37 armnn::numeric_cast<unsigned int>(
38 std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies<int32_t>()));
39
40 auto stretchIndex = static_cast<size_t>(std::distance(targetShape.begin(), stretchDim));
41 outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
42 }
43
44 armnn::TensorShape outputShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()),
45 outputDims.data());
46 reshapeDesc.m_TargetShape = outputShape;
47 return kTfLiteOk;
48}
49
Sadik Armagan62483be2020-10-23 17:14:43 +010050TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
51 TfLiteContext* tfLiteContext,
52 TfLiteNode* tfLiteNode,
53 int nodeIndex,
54 int32_t operatorCode)
55{
David Monahan1670b0c2020-11-18 14:40:27 +000056 auto numInputs = tfLiteNode->inputs->size;
Finn Williams6f9f9902020-11-13 13:23:15 +000057
David Monahan1670b0c2020-11-18 14:40:27 +000058 if (numInputs == 2)
59 {
60 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
61 }
62 else
63 {
64 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
65 }
66 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
67
68 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
69 const TfLiteTensor& tfLiteInputTensor0 = tfLiteTensors[tfLiteNode->inputs->data[0]];
70 if (IsDynamicTensor(tfLiteInputTensor0))
71 {
72 TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
73 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
74 "operator #%d node #%d: ",
75 operatorCode, nodeIndex);
76 return kTfLiteError;
77 }
78
79 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
80 if (IsDynamicTensor(tfLiteOutputTensor))
81 {
82 TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
83 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in "
84 "operator #%d node #%d: ",
85 operatorCode, nodeIndex);
86 return kTfLiteError;
87 }
88
89 const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
90 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
91
92 armnn::ReshapeDescriptor reshapeDesc;
93
94 // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
95 if (numInputs == 2)
96 {
97 const TfLiteTensor& tfLiteShapeInputTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
98 if (IsDynamicTensor(tfLiteShapeInputTensor))
99 {
100 TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
101 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in "
102 "operator #%d node #%d: ",
103 operatorCode, nodeIndex);
104 return kTfLiteError;
105 }
106
107 // Get the shape data out of the input tensor
108 std::vector<int32_t> targetShape;
109 auto* shapeTensorDataPtr = tflite::GetTensorData<int32_t>(&tfLiteShapeInputTensor);
110 auto shapeTensorNumValues = tfLiteShapeInputTensor.dims->data[0];
111 for (auto i=0; i < shapeTensorNumValues; ++i)
112 {
113 targetShape.push_back(*(shapeTensorDataPtr+i));
114 }
115
116 // Use the data to create the required tensor shape.
117 if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
118 {
119 TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
120 "TfLiteArmnnDelegate: At most one component of shape can be -1 in: "
121 "operator #%d node #%d: ",
122 operatorCode, nodeIndex);
123 return kTfLiteError;
124 }
125 }
126 else if (tfLiteNode->builtin_data)
127 {
128 std::vector<int32_t> targetShape;
129 TfLiteReshapeParams* reshapeOptions =
130 reinterpret_cast<TfLiteReshapeParams*>(tfLiteNode->builtin_data);
131 for (int i=0; i < reshapeOptions->num_dimensions; ++i)
132 {
133 targetShape.push_back(reshapeOptions->shape[i]);
134 }
135 if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
136 {
137 TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
138 "TfLiteArmnnDelegate: At most one component of shape can be -1 in: "
139 "operator #%d node #%d: ",
140 operatorCode, nodeIndex);
141 return kTfLiteError;
142 }
143 }
144 else
145 {
146 TF_LITE_MAYBE_KERNEL_LOG(tfLiteContext,
147 "Target shape not defined in reshape parameters or input tensor. "
148 "At least one method required in operator #%d node #%d: ",
149 operatorCode, nodeIndex);
150 }
151
152 bool isSupported = false;
153 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
154 {
155 FORWARD_LAYER_SUPPORT_FUNC(__func__,
156 tfLiteContext,
157 IsReshapeSupported,
158 delegateData.m_Backends,
159 isSupported,
160 inputTensorInfo0,
161 outInfo,
162 reshapeDesc);
163 };
164
165 if (!delegateData.m_Network)
166 {
167 validateFunc(outputTensorInfo, isSupported);
168 return isSupported ? kTfLiteOk : kTfLiteError;
169 }
170
171 armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
172 ARMNN_ASSERT(layer != nullptr);
173
174 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
175 outputSlot.SetTensorInfo(outputTensorInfo);
176
177 // Connect
178 return Connect(layer, tfLiteNode, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100179}
180
181TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
182 TfLiteContext* tfLiteContext,
183 TfLiteNode* tfLiteNode,
184 int nodeIndex,
185 int32_t operatorCode)
186{
Finn Williams6f9f9902020-11-13 13:23:15 +0000187 armnn::IgnoreUnused(delegateData,
188 tfLiteContext,
189 tfLiteNode,
190 nodeIndex,
191 operatorCode);
192
Sadik Armagan62483be2020-10-23 17:14:43 +0100193 return kTfLiteError;
194}
195
196TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
197 TfLiteContext* tfLiteContext,
198 TfLiteNode* tfLiteNode,
199 int nodeIndex,
200 int32_t operatorCode)
201{
Finn Williams6f9f9902020-11-13 13:23:15 +0000202 armnn::IgnoreUnused(delegateData,
203 tfLiteContext,
204 tfLiteNode,
205 nodeIndex,
206 operatorCode);
207
Sadik Armagan62483be2020-10-23 17:14:43 +0100208 return kTfLiteError;
209}
210
211} // namespace armnnDelegate