blob: d7f5c5c73fe3fd4e8e2b40bceb4f9eb34263191f [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Francis Murtagh36d94ef2023-04-28 14:05:43 +01005#pragma once
6
7#include <OpaqueDelegateUtils.hpp>
8
9namespace armnnOpaqueDelegate
10{
11
12TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
13 TfLiteOpaqueContext* tfLiteContext,
14 TfLiteOpaqueNode* tfLiteNode,
15 int nodeIndex,
16 int32_t operatorCode)
17{
18 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
19 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
20
21 // Gather input indices and use to get input tensor.
22 const int* inputTensors;
23 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
24 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
25 {
26 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
27 tfLiteContext,
28 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
29 nodeIndex);
30 return kTfLiteError;
31 }
32
33 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
34 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
35 {
36 return kTfLiteError;
37 }
38
39 // Gather output indices and use to get output tensors.
40 int numOutputs = 0;
41 const int* outputTensors;
42 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
43 {
44 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
45 tfLiteContext,
46 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
47 nodeIndex);
48 return kTfLiteError;
49 }
50
51 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
52 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
53 {
54 return kTfLiteError;
55 }
56
57 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
58 armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
59
60 UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
61
62 bool isSupported = false;
63 armnn::BackendId setBackend;
64 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
65 {
66 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEQUANTIZE",
67 tfLiteContext,
68 IsDequantizeSupported,
69 delegateData.m_Backends,
70 isSupported,
71 setBackend,
72 inputTensorInfo,
73 outputTensorInfo);
74 };
75
76 if (!delegateData.m_Network)
77 {
78 validateFunc(outputTensorInfo, isSupported);
79 return isSupported ? kTfLiteOk : kTfLiteError;
80 }
81
Mike Kellya2806502023-08-03 10:42:11 +010082 auto layerName = GetName(armnn::LayerType::Dequantize, nodeIndex);
83 armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer(layerName.c_str());
Francis Murtagh36d94ef2023-04-28 14:05:43 +010084 dequantizeLayer->SetBackendId(setBackend);
85 ARMNN_ASSERT(dequantizeLayer != nullptr);
86
87 armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
88 outputSlot.SetTensorInfo(outputTensorInfo);
89
90 auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
91 delegateData,
92 tfLiteContext,
Mike Kellya2806502023-08-03 10:42:11 +010093 tfLiteNode,
94 nodeIndex);
Francis Murtagh36d94ef2023-04-28 14:05:43 +010095 if (inputsTensorsProcess == kTfLiteError)
96 {
97 return inputsTensorsProcess;
98 }
99
100 return Connect(dequantizeLayer, tfLiteContext, tfLiteNode, delegateData);
101}
102
103TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
104 TfLiteOpaqueContext* tfLiteContext,
105 TfLiteOpaqueNode* tfLiteNode,
106 int nodeIndex,
107 int32_t operatorCode)
108{
109 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
110 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
111
112 // Gather input indices and use to get input tensor.
113 const int* inputTensors;
114 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
115 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
116 {
117 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
118 tfLiteContext,
119 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
120 nodeIndex);
121 return kTfLiteError;
122 }
123
124 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
125 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
126 {
127 return kTfLiteError;
128 }
129
130 // Gather output indices and use to get output tensors.
131 int numOutputs = 0;
132 const int* outputTensors;
133 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
134 {
135 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
136 tfLiteContext,
137 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
138 nodeIndex);
139 return kTfLiteError;
140 }
141
142 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
143 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
144 {
145 return kTfLiteError;
146 }
147
148 // Only affine per-layer quantization is supported.
149 if (!IsAffineQuantization(*tfLiteOutputTensor))
150 {
151 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
152 tfLiteContext,
153 "TfLiteArmnnOpaqueDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
154 operatorCode, nodeIndex);
155 return kTfLiteError;
156 }
157
158 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
159 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
160
161 bool isSupported = false;
162 armnn::BackendId setBackend;
163 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
164 {
165 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("QUANTIZE",
166 tfLiteContext,
167 IsQuantizeSupported,
168 delegateData.m_Backends,
169 isSupported,
170 setBackend,
171 inputTensorInfo,
172 outputTensorInfo);
173 };
174
175 if (!delegateData.m_Network)
176 {
177 validateFunc(outputTensorInfo, isSupported);
178 return isSupported ? kTfLiteOk : kTfLiteError;
179 }
180
Mike Kellya2806502023-08-03 10:42:11 +0100181 auto layerName = GetName(armnn::LayerType::Quantize, nodeIndex);
182 armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer(layerName.c_str());
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100183 quantizeLayer->SetBackendId(setBackend);
184 ARMNN_ASSERT(quantizeLayer != nullptr);
185
186 armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
187 outputSlot.SetTensorInfo(outputTensorInfo);
188
189 // try to connect the Constant Inputs if there are any
Mike Kellya2806502023-08-03 10:42:11 +0100190 if (ProcessInputs(quantizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Francis Murtagh36d94ef2023-04-28 14:05:43 +0100191 {
192 return kTfLiteError;
193 }
194
195 return Connect(quantizeLayer, tfLiteContext, tfLiteNode, delegateData);
196}
197
198} // namespace armnnOpaqueDelegate