blob: f1192960e44700b753668b55b7326f65964ecf82 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Ryan OShea4c231de2023-01-17 15:19:20 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Finn Williams6f9f9902020-11-13 13:23:15 +00008#include <armnn/utility/IgnoreUnused.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
14
15namespace armnnDelegate
16{
17
Sadik Armagan62483be2020-10-23 17:14:43 +010018TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
19 TfLiteContext* tfLiteContext,
20 TfLiteNode* tfLiteNode,
21 int nodeIndex,
Matthew Sloyan0d35a932020-11-09 12:25:05 +000022 int32_t tfLiteDequantizeOperatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +010023{
Matthew Sloyan0d35a932020-11-09 12:25:05 +000024 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
25 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
26
27 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
28 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
29 if (IsDynamicTensor(tfLiteInputTensor))
30 {
31 TF_LITE_MAYBE_KERNEL_LOG(
32 tfLiteContext,
33 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
34 tfLiteDequantizeOperatorCode, nodeIndex);
35 return kTfLiteError;
36 }
37
38 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
39 if (IsDynamicTensor(tfLiteOutputTensor))
40 {
41 TF_LITE_MAYBE_KERNEL_LOG(
42 tfLiteContext,
43 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
44 tfLiteDequantizeOperatorCode, nodeIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +000045
Matthew Sloyan0d35a932020-11-09 12:25:05 +000046 return kTfLiteError;
47 }
Matthew Sloyan0d35a932020-11-09 12:25:05 +000048 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +010049 armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
50
51 UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
Matthew Sloyan0d35a932020-11-09 12:25:05 +000052
53 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +010054 armnn::BackendId setBackend;
Matthew Sloyan0d35a932020-11-09 12:25:05 +000055 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
56 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000057 FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
Matthew Sloyan0d35a932020-11-09 12:25:05 +000058 tfLiteContext,
59 IsDequantizeSupported,
60 delegateData.m_Backends,
61 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +010062 setBackend,
Matthew Sloyan0d35a932020-11-09 12:25:05 +000063 inputTensorInfo,
64 outputTensorInfo);
65 };
66
67 if (!delegateData.m_Network)
68 {
69 validateFunc(outputTensorInfo, isSupported);
70 return isSupported ? kTfLiteOk : kTfLiteError;
71 }
72
73 armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
Cathal Corbett53837672022-09-01 11:34:37 +010074 dequantizeLayer->SetBackendId(setBackend);
Matthew Sloyan0d35a932020-11-09 12:25:05 +000075 ARMNN_ASSERT(dequantizeLayer != nullptr);
76
77 armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
78 outputSlot.SetTensorInfo(outputTensorInfo);
79
Sadik Armaganf7ac72c2021-05-05 15:03:50 +010080 auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
81 delegateData,
82 tfLiteContext,
83 tfLiteNode);
84 if (inputsTensorsProcess == kTfLiteError)
85 {
86 return inputsTensorsProcess;
87 }
88
Matthew Sloyan0d35a932020-11-09 12:25:05 +000089 return Connect(dequantizeLayer, tfLiteNode, delegateData);
90}
91
92TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
93 TfLiteContext* tfLiteContext,
94 TfLiteNode* tfLiteNode,
95 int nodeIndex,
96 int32_t tfLiteQuantizeOperatorCode)
97{
98 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
99 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
100
101 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
102 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
103 if (IsDynamicTensor(tfLiteInputTensor))
104 {
105 TF_LITE_MAYBE_KERNEL_LOG(
106 tfLiteContext,
107 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
108 tfLiteQuantizeOperatorCode, nodeIndex);
109 return kTfLiteError;
110 }
111
112 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
113 if (IsDynamicTensor(tfLiteOutputTensor))
114 {
115 TF_LITE_MAYBE_KERNEL_LOG(
116 tfLiteContext,
117 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
118 tfLiteQuantizeOperatorCode, nodeIndex);
119 return kTfLiteError;
120 }
121
122 // Only affine per-layer quantization is supported.
123 if (!IsAffineQuantization(tfLiteOutputTensor))
124 {
125 TF_LITE_MAYBE_KERNEL_LOG(
126 tfLiteContext,
127 "TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
128 tfLiteQuantizeOperatorCode, nodeIndex);
129 return kTfLiteError;
130 }
131
132 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100133 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000134
135 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100136 armnn::BackendId setBackend;
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000137 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
138 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000139 FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000140 tfLiteContext,
141 IsQuantizeSupported,
142 delegateData.m_Backends,
143 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100144 setBackend,
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000145 inputTensorInfo,
146 outputTensorInfo);
147 };
148
149 if (!delegateData.m_Network)
150 {
151 validateFunc(outputTensorInfo, isSupported);
152 return isSupported ? kTfLiteOk : kTfLiteError;
153 }
154
155 armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
Cathal Corbett53837672022-09-01 11:34:37 +0100156 quantizeLayer->SetBackendId(setBackend);
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000157 ARMNN_ASSERT(quantizeLayer != nullptr);
158
159 armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
160 outputSlot.SetTensorInfo(outputTensorInfo);
161
Ryan OShea4c231de2023-01-17 15:19:20 +0000162 // try to connect the Constant Inputs if there are any
163 if(ProcessInputs(quantizeLayer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
164 {
165 return kTfLiteError;
166 }
167
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000168 return Connect(quantizeLayer, tfLiteNode, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100169}
170
171} // namespace armnnDelegate