blob: 565f1e37c0d331e805f3f86630ef0aad7c4f97f1 [file] [log] [blame]
Sadik Armagan62483be2020-10-23 17:14:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Finn Williams6f9f9902020-11-13 13:23:15 +00008#include <armnn/utility/IgnoreUnused.hpp>
9
Sadik Armagan62483be2020-10-23 17:14:43 +010010#include <tensorflow/lite/builtin_ops.h>
11#include <tensorflow/lite/c/builtin_op_data.h>
12#include <tensorflow/lite/c/common.h>
13#include <tensorflow/lite/minimal_logging.h>
14
15namespace armnnDelegate
16{
17
Sadik Armagan62483be2020-10-23 17:14:43 +010018TfLiteStatus VisitDequantizeOperator(DelegateData& delegateData,
19 TfLiteContext* tfLiteContext,
20 TfLiteNode* tfLiteNode,
21 int nodeIndex,
Matthew Sloyan0d35a932020-11-09 12:25:05 +000022 int32_t tfLiteDequantizeOperatorCode)
Sadik Armagan62483be2020-10-23 17:14:43 +010023{
Matthew Sloyan0d35a932020-11-09 12:25:05 +000024 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
25 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
26
27 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
28 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
29 if (IsDynamicTensor(tfLiteInputTensor))
30 {
31 TF_LITE_MAYBE_KERNEL_LOG(
32 tfLiteContext,
33 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
34 tfLiteDequantizeOperatorCode, nodeIndex);
35 return kTfLiteError;
36 }
37
38 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
39 if (IsDynamicTensor(tfLiteOutputTensor))
40 {
41 TF_LITE_MAYBE_KERNEL_LOG(
42 tfLiteContext,
43 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
44 tfLiteDequantizeOperatorCode, nodeIndex);
Finn Williams6f9f9902020-11-13 13:23:15 +000045
Matthew Sloyan0d35a932020-11-09 12:25:05 +000046 return kTfLiteError;
47 }
48
49 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
50 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
51
52 bool isSupported = false;
53 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
54 {
55 FORWARD_LAYER_SUPPORT_FUNC(__func__,
56 tfLiteContext,
57 IsDequantizeSupported,
58 delegateData.m_Backends,
59 isSupported,
60 inputTensorInfo,
61 outputTensorInfo);
62 };
63
64 if (!delegateData.m_Network)
65 {
66 validateFunc(outputTensorInfo, isSupported);
67 return isSupported ? kTfLiteOk : kTfLiteError;
68 }
69
70 armnn::IConnectableLayer* dequantizeLayer = delegateData.m_Network->AddDequantizeLayer();
71 ARMNN_ASSERT(dequantizeLayer != nullptr);
72
73 armnn::IOutputSlot& outputSlot = dequantizeLayer->GetOutputSlot(0);
74 outputSlot.SetTensorInfo(outputTensorInfo);
75
76 return Connect(dequantizeLayer, tfLiteNode, delegateData);
77}
78
79TfLiteStatus VisitQuantizeOperator(DelegateData& delegateData,
80 TfLiteContext* tfLiteContext,
81 TfLiteNode* tfLiteNode,
82 int nodeIndex,
83 int32_t tfLiteQuantizeOperatorCode)
84{
85 TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
86 TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
87
88 const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
89 const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
90 if (IsDynamicTensor(tfLiteInputTensor))
91 {
92 TF_LITE_MAYBE_KERNEL_LOG(
93 tfLiteContext,
94 "TfLiteArmnnDelegate: Dynamic input tensors are not supported in operator #%d node #%d: ",
95 tfLiteQuantizeOperatorCode, nodeIndex);
96 return kTfLiteError;
97 }
98
99 const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
100 if (IsDynamicTensor(tfLiteOutputTensor))
101 {
102 TF_LITE_MAYBE_KERNEL_LOG(
103 tfLiteContext,
104 "TfLiteArmnnDelegate: Dynamic output tensors are not supported in operator #%d node #%d: ",
105 tfLiteQuantizeOperatorCode, nodeIndex);
106 return kTfLiteError;
107 }
108
109 // Only affine per-layer quantization is supported.
110 if (!IsAffineQuantization(tfLiteOutputTensor))
111 {
112 TF_LITE_MAYBE_KERNEL_LOG(
113 tfLiteContext,
114 "TfLiteArmnnDelegate: Only affine per-layer quantization is supported in operator #%d node #%d: ",
115 tfLiteQuantizeOperatorCode, nodeIndex);
116 return kTfLiteError;
117 }
118
119 const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
120 const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
121
122 bool isSupported = false;
123 auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
124 {
125 FORWARD_LAYER_SUPPORT_FUNC(__func__,
126 tfLiteContext,
127 IsQuantizeSupported,
128 delegateData.m_Backends,
129 isSupported,
130 inputTensorInfo,
131 outputTensorInfo);
132 };
133
134 if (!delegateData.m_Network)
135 {
136 validateFunc(outputTensorInfo, isSupported);
137 return isSupported ? kTfLiteOk : kTfLiteError;
138 }
139
140 armnn::IConnectableLayer* quantizeLayer = delegateData.m_Network->AddQuantizeLayer();
141 ARMNN_ASSERT(quantizeLayer != nullptr);
142
143 armnn::IOutputSlot& outputSlot = quantizeLayer->GetOutputSlot(0);
144 outputSlot.SetTensorInfo(outputTensorInfo);
145
146 return Connect(quantizeLayer, tfLiteNode, delegateData);
Sadik Armagan62483be2020-10-23 17:14:43 +0100147}
148
149} // namespace armnnDelegate