blob: 7fcb9c7c44e2253d5e487ee4f27782fb09153170 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea4c231de2023-01-17 15:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Finn Williams6f9f9902020-11-13 13:23:15 +00008#include <armnn/utility/IgnoreUnused.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
14
15namespace armnnDelegate
16{
17
Sadik Armagan62483be2020-10-23 17:14:43 +010018TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
19 TfLiteContext* tfLiteContext,
20 TfLiteNode* tfLiteNode,
21 int nodeIndex,
Matthew Sloyan0d35a932020-11-09 12:25:05 +000022 int32_t tfLiteDequantizeOperatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +010023{
Matthew Sloyan0d35a932020-11-09 12:25:05 +000024 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
25 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
Matthew Sloyan0d35a932020-11-09 12:25:05 +000026 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
27 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
28 if (IsDynamicTensor(tfLiteInputTensor))
29 {
30 TF_LITE_MAYBE_KERNEL_LOG(
31 tfLiteContext,
32 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
33 tfLiteDequantizeOperatorCode, nodeIndex);
34 return kTfLiteError;
35 }
Matthew Sloyan0d35a932020-11-09 12:25:05 +000036 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
37 if (IsDynamicTensor(tfLiteOutputTensor))
38 {
39 TF_LITE_MAYBE_KERNEL_LOG(
40 tfLiteContext,
41 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
42 tfLiteDequantizeOperatorCode, nodeIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +000043
Matthew Sloyan0d35a932020-11-09 12:25:05 +000044 return kTfLiteError;
45 }
Matthew Sloyan0d35a932020-11-09 12:25:05 +000046 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +010047 armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
48
49 UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
Matthew Sloyan0d35a932020-11-09 12:25:05 +000050
51 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +010052 armnn::BackendId setBackend;
Matthew Sloyan0d35a932020-11-09 12:25:05 +000053 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
54 {
Mike Kelly080d45d2023-11-10 17:11:53 +000055 // If this is a Dequantize with a Constant input then will be replaced by a Constant layer that contains the
56 // dequantized values during optimization so there's no need to check if it can be supported by the backend
57 if (tflite::IsConstantTensor(&tfLiteInputTensor))
58 {
59 isSupported = true;
60 }
61 else
62 {
63 FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
64 tfLiteContext,
65 IsDequantizeSupported,
66 delegateData.m_Backends,
67 isSupported,
68 setBackend,
69 inputTensorInfo,
70 outputTensorInfo);
71 }
Matthew Sloyan0d35a932020-11-09 12:25:05 +000072 };
73
74 if (!delegateData.m_Network)
75 {
76 validateFunc(outputTensorInfo, isSupported);
77 return isSupported ? kTfLiteOk : kTfLiteError;
78 }
79
Mike Kelly07169c82023-08-02 13:23:09 +010080 auto layerName = GetLayerName(armnn::LayerType::Dequantize, nodeIndex);
81 armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer(layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +010082 dequantizeLayer->SetBackendId(setBackend);
Matthew Sloyan0d35a932020-11-09 12:25:05 +000083 ARMNN_ASSERT(dequantizeLayer != nullptr);
84
85 armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
86 outputSlot.SetTensorInfo(outputTensorInfo);
87
Sadik Armaganf7ac72c2021-05-05 15:03:50 +010088 auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
89 delegateData,
90 tfLiteContext,
Mike Kelly07169c82023-08-02 13:23:09 +010091 tfLiteNode,
92 nodeIndex);
Sadik Armaganf7ac72c2021-05-05 15:03:50 +010093 if (inputsTensorsProcess == kTfLiteError)
94 {
95 return inputsTensorsProcess;
96 }
97
Matthew Sloyan0d35a932020-11-09 12:25:05 +000098 return Connect(dequantizeLayer, tfLiteNode, delegateData);
99}
100
101TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
102 TfLiteContext* tfLiteContext,
103 TfLiteNode* tfLiteNode,
104 int nodeIndex,
105 int32_t tfLiteQuantizeOperatorCode)
106{
107 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
108 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
109
110 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
111 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
112 if (IsDynamicTensor(tfLiteInputTensor))
113 {
114 TF_LITE_MAYBE_KERNEL_LOG(
115 tfLiteContext,
116 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
117 tfLiteQuantizeOperatorCode, nodeIndex);
118 return kTfLiteError;
119 }
120
121 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
122 if (IsDynamicTensor(tfLiteOutputTensor))
123 {
124 TF_LITE_MAYBE_KERNEL_LOG(
125 tfLiteContext,
126 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
127 tfLiteQuantizeOperatorCode, nodeIndex);
128 return kTfLiteError;
129 }
130
131 // Only affine per-layer quantization is supported.
132 if (!IsAffineQuantization(tfLiteOutputTensor))
133 {
134 TF_LITE_MAYBE_KERNEL_LOG(
135 tfLiteContext,
136 "TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
137 tfLiteQuantizeOperatorCode, nodeIndex);
138 return kTfLiteError;
139 }
140
141 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100142 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000143
144 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100145 armnn::BackendId setBackend;
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000146 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
147 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000148 FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000149 tfLiteContext,
150 IsQuantizeSupported,
151 delegateData.m_Backends,
152 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100153 setBackend,
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000154 inputTensorInfo,
155 outputTensorInfo);
156 };
157
158 if (!delegateData.m_Network)
159 {
160 validateFunc(outputTensorInfo, isSupported);
161 return isSupported ? kTfLiteOk : kTfLiteError;
162 }
163
Mike Kelly07169c82023-08-02 13:23:09 +0100164 auto layerName = GetLayerName(armnn::LayerType::Quantize, nodeIndex);
165 armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer(layerName.c_str());
Cathal Corbett53837672022-09-01 11:34:37 +0100166 quantizeLayer->SetBackendId(setBackend);
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000167 ARMNN_ASSERT(quantizeLayer != nullptr);
168
169 armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
170 outputSlot.SetTensorInfo(outputTensorInfo);
171
Ryan OShea4c231de2023-01-17 15:19:20 +0000172 // try to connect the Constant Inputs if there are any
Mike Kelly07169c82023-08-02 13:23:09 +0100173 if (ProcessInputs(quantizeLayer, delegateData, tfLiteContext, tfLiteNode, nodeIndex) != kTfLiteOk)
Ryan OShea4c231de2023-01-17 15:19:20 +0000174 {
175 return kTfLiteError;
176 }
177
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000178 return Connect(quantizeLayer, tfLiteNode, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100179}
180
181} // namespace armnnDelegate