blob: 78713759fbc6708a7d4e8fb8b9c9a11756a6efd2 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
Sadik Armagan90a119b2022-08-05 16:12:49 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Sadik Armagan62483be2020-10-23 17:14:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Finn Williams6f9f9902020-11-13 13:23:15 +00008#include <armnn/utility/IgnoreUnused.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
14
15namespace armnnDelegate
16{
17
Sadik Armagan62483be2020-10-23 17:14:43 +010018TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
19 TfLiteContext* tfLiteContext,
20 TfLiteNode* tfLiteNode,
21 int nodeIndex,
Matthew Sloyan0d35a932020-11-09 12:25:05 +000022 int32_t tfLiteDequantizeOperatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +010023{
Matthew Sloyan0d35a932020-11-09 12:25:05 +000024 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
25 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
26
27 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
28 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
29 if (IsDynamicTensor(tfLiteInputTensor))
30 {
31 TF_LITE_MAYBE_KERNEL_LOG(
32 tfLiteContext,
33 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
34 tfLiteDequantizeOperatorCode, nodeIndex);
35 return kTfLiteError;
36 }
37
38 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
39 if (IsDynamicTensor(tfLiteOutputTensor))
40 {
41 TF_LITE_MAYBE_KERNEL_LOG(
42 tfLiteContext,
43 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
44 tfLiteDequantizeOperatorCode, nodeIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +000045
Matthew Sloyan0d35a932020-11-09 12:25:05 +000046 return kTfLiteError;
47 }
Matthew Sloyan0d35a932020-11-09 12:25:05 +000048 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +010049 armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
50
51 UpdateConstantTensorOutputs(inputTensorInfo, outputTensorInfo);
Matthew Sloyan0d35a932020-11-09 12:25:05 +000052
53 bool isSupported = false;
54 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
55 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +000056 FORWARD_LAYER_SUPPORT_FUNC("DEQUANTIZE",
Matthew Sloyan0d35a932020-11-09 12:25:05 +000057 tfLiteContext,
58 IsDequantizeSupported,
59 delegateData.m_Backends,
60 isSupported,
61 inputTensorInfo,
62 outputTensorInfo);
63 };
64
65 if (!delegateData.m_Network)
66 {
67 validateFunc(outputTensorInfo, isSupported);
68 return isSupported ? kTfLiteOk : kTfLiteError;
69 }
70
71 armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
72 ARMNN_ASSERT(dequantizeLayer != nullptr);
73
74 armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
75 outputSlot.SetTensorInfo(outputTensorInfo);
76
Sadik Armaganf7ac72c2021-05-05 15:03:50 +010077 auto inputsTensorsProcess = ProcessInputs(dequantizeLayer,
78 delegateData,
79 tfLiteContext,
80 tfLiteNode);
81 if (inputsTensorsProcess == kTfLiteError)
82 {
83 return inputsTensorsProcess;
84 }
85
Matthew Sloyan0d35a932020-11-09 12:25:05 +000086 return Connect(dequantizeLayer, tfLiteNode, delegateData);
87}
88
89TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
90 TfLiteContext* tfLiteContext,
91 TfLiteNode* tfLiteNode,
92 int nodeIndex,
93 int32_t tfLiteQuantizeOperatorCode)
94{
95 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
96 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
97
98 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
99 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
100 if (IsDynamicTensor(tfLiteInputTensor))
101 {
102 TF_LITE_MAYBE_KERNEL_LOG(
103 tfLiteContext,
104 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
105 tfLiteQuantizeOperatorCode, nodeIndex);
106 return kTfLiteError;
107 }
108
109 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
110 if (IsDynamicTensor(tfLiteOutputTensor))
111 {
112 TF_LITE_MAYBE_KERNEL_LOG(
113 tfLiteContext,
114 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
115 tfLiteQuantizeOperatorCode, nodeIndex);
116 return kTfLiteError;
117 }
118
119 // Only affine per-layer quantization is supported.
120 if (!IsAffineQuantization(tfLiteOutputTensor))
121 {
122 TF_LITE_MAYBE_KERNEL_LOG(
123 tfLiteContext,
124 "TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
125 tfLiteQuantizeOperatorCode, nodeIndex);
126 return kTfLiteError;
127 }
128
129 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
Sadik Armagan90a119b2022-08-05 16:12:49 +0100130 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000131
132 bool isSupported = false;
133 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
134 {
Sadik Armaganbfa767c2022-02-09 14:58:03 +0000135 FORWARD_LAYER_SUPPORT_FUNC("QUANTIZE",
Matthew Sloyan0d35a932020-11-09 12:25:05 +0000136 tfLiteContext,
137 IsQuantizeSupported,
138 delegateData.m_Backends,
139 isSupported,
140 inputTensorInfo,
141 outputTensorInfo);
142 };
143
144 if (!delegateData.m_Network)
145 {
146 validateFunc(outputTensorInfo, isSupported);
147 return isSupported ? kTfLiteOk : kTfLiteError;
148 }
149
150 armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
151 ARMNN_ASSERT(quantizeLayer != nullptr);
152
153 armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
154 outputSlot.SetTensorInfo(outputTensorInfo);
155
156 return Connect(quantizeLayer, tfLiteNode, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100157}
158
159} // namespace armnnDelegate