blob: 7a1dd6fd1778e63630ecc9226049e90c1888c5b8 [file] [log] [blame]
Francis Murtaghc4fb0dd2023-03-16 17:01:56 +00001//
2// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
Francis Murtagh36d94ef2023-04-28 14:05:43 +01005#pragma once
6
7#include <OpaqueDelegateUtils.hpp>
8
9namespace armnnOpaqueDelegate
10{
11
12TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
13 TfLiteOpaqueContext* tfLiteContext,
14 TfLiteOpaqueNode* tfLiteNode,
15 int nodeIndex,
16 int32_t operatorCode)
17{
18 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
19 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
20
21 // Gather input indices and use to get input tensor.
22 const int* inputTensors;
23 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
24 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
25 {
26 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
27 tfLiteContext,
28 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
29 nodeIndex);
30 return kTfLiteError;
31 }
32
33 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
34 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
35 {
36 return kTfLiteError;
37 }
38
39 // Gather output indices and use to get output tensors.
40 int numOutputs = 0;
41 const int* outputTensors;
42 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
43 {
44 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
45 tfLiteContext,
46 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
47 nodeIndex);
48 return kTfLiteError;
49 }
50
51 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
52 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
53 {
54 return kTfLiteError;
55 }
56
57 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
58 armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
59
60 UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
61
62 bool isSupported = false;
63 armnn::BackendId setBackend;
64 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
65 {
66 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEQUANTIZE",
67 tfLiteContext,
68 IsDequantizeSupported,
69 delegateData.m_Backends,
70 isSupported,
71 setBackend,
72 inputTensorInfo,
73 outputTensorInfo);
74 };
75
76 if (!delegateData.m_Network)
77 {
78 validateFunc(outputTensorInfo, isSupported);
79 return isSupported ? kTfLiteOk : kTfLiteError;
80 }
81
82 armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
83 dequantizeLayer->SetBackendId(setBackend);
84 ARMNN_ASSERT(dequantizeLayer != nullptr);
85
86 armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
87 outputSlot.SetTensorInfo(outputTensorInfo);
88
89 auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
90 delegateData,
91 tfLiteContext,
92 tfLiteNode);
93 if (inputsTensorsProcess == kTfLiteError)
94 {
95 return inputsTensorsProcess;
96 }
97
98 return Connect(dequantizeLayer, tfLiteContext, tfLiteNode, delegateData);
99}
100
101TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
102 TfLiteOpaqueContext* tfLiteContext,
103 TfLiteOpaqueNode* tfLiteNode,
104 int nodeIndex,
105 int32_t operatorCode)
106{
107 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
108 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
109
110 // Gather input indices and use to get input tensor.
111 const int* inputTensors;
112 auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
113 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
114 {
115 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
116 tfLiteContext,
117 "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
118 nodeIndex);
119 return kTfLiteError;
120 }
121
122 const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
123 if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
124 {
125 return kTfLiteError;
126 }
127
128 // Gather output indices and use to get output tensors.
129 int numOutputs = 0;
130 const int* outputTensors;
131 if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
132 {
133 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
134 tfLiteContext,
135 "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
136 nodeIndex);
137 return kTfLiteError;
138 }
139
140 const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
141 if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
142 {
143 return kTfLiteError;
144 }
145
146 // Only affine per-layer quantization is supported.
147 if (!IsAffineQuantization(*tfLiteOutputTensor))
148 {
149 TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
150 tfLiteContext,
151 "TfLiteArmnnOpaqueDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
152 operatorCode, nodeIndex);
153 return kTfLiteError;
154 }
155
156 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
157 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
158
159 bool isSupported = false;
160 armnn::BackendId setBackend;
161 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
162 {
163 FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("QUANTIZE",
164 tfLiteContext,
165 IsQuantizeSupported,
166 delegateData.m_Backends,
167 isSupported,
168 setBackend,
169 inputTensorInfo,
170 outputTensorInfo);
171 };
172
173 if (!delegateData.m_Network)
174 {
175 validateFunc(outputTensorInfo, isSupported);
176 return isSupported ? kTfLiteOk : kTfLiteError;
177 }
178
179 armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
180 quantizeLayer->SetBackendId(setBackend);
181 ARMNN_ASSERT(quantizeLayer != nullptr);
182
183 armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
184 outputSlot.SetTensorInfo(outputTensorInfo);
185
186 // try to connect the Constant Inputs if there are any
187 if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
188 {
189 return kTfLiteError;
190 }
191
192 return Connect(quantizeLayer, tfLiteContext, tfLiteNode, delegateData);
193}
194
195} // namespace armnnOpaqueDelegate