MLCE-1138 Issue with Delegate supporting FP16 models
* Fixed issue where backends were asked to support FP16 layers that would
be optimized out.
* Fixed issue where backends were asked to support non-constant filter
and bias tensors when those tensors would be replaced by constant
tensors during optimization.
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ib54b9cb99d5014e27172841a665daf57d1d5b23d
diff --git a/delegate/opaque/src/Quantization.hpp b/delegate/opaque/src/Quantization.hpp
index d7f5c5c..e2e5f76 100644
--- a/delegate/opaque/src/Quantization.hpp
+++ b/delegate/opaque/src/Quantization.hpp
@@ -31,6 +31,7 @@
}
const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+
if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
{
return kTfLiteError;
@@ -63,14 +64,23 @@
armnn::BackendId setBackend;
auto validateFunc = [&](const armnn::TensorInfo& outputTensorInfo, bool& isSupported)
{
- FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEQUANTIZE",
- tfLiteContext,
- IsDequantizeSupported,
- delegateData.m_Backends,
- isSupported,
- setBackend,
- inputTensorInfo,
- outputTensorInfo);
+ // If this is a Dequantize with a Constant input then will be replaced by a Constant layer that contains the
+ // dequantized values during optimization so there's no need to check if it can be supported by the backend
+ if (IsConstantTensor(tfLiteInputTensor))
+ {
+ isSupported = true;
+ }
+ else
+ {
+ FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("DEQUANTIZE",
+ tfLiteContext,
+ IsDequantizeSupported,
+ delegateData.m_Backends,
+ isSupported,
+ setBackend,
+ inputTensorInfo,
+ outputTensorInfo);
+ }
};
if (!delegateData.m_Network)