IVGCVSW-4555 ArmnnConverter (Serializer) does not support per-axis quantization params
* TensorInfo can have multiple scales and quantization dimension.
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I0ff02e3766996b6a9da6dc4e92d366bc9505c77d
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index bc6fbf0..58232a2 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -508,6 +508,9 @@
case DataType_QAsymmS8:
type = armnn::DataType::QAsymmS8;
break;
+ case DataType_QSymmS8:
+ type = armnn::DataType::QSymmS8;
+ break;
case DataType_QuantisedAsymm8:
case DataType_QAsymmU8:
type = armnn::DataType::QAsymmU8;
@@ -539,13 +542,30 @@
location.AsString()));
}
}
- float quantizationScale = tensorPtr->quantizationScale();
- int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
auto dimensions = tensorPtr->dimensions();
unsigned int size = dimensions->size();
std::vector<unsigned int> outputDims(dimensions->begin(), dimensions->begin() + size);
+ auto quantizationScales = tensorPtr->quantizationScales();
+
+ if (quantizationScales)
+ {
+ unsigned int quantizationScalesSize = quantizationScales->size();
+ std::vector<float> scales(quantizationScales->begin(), quantizationScales->begin() + quantizationScalesSize);
+ unsigned int quantizationDim = tensorPtr->quantizationDim();
+ armnn::TensorInfo result(size,
+ outputDims.data(),
+ type,
+ scales,
+ quantizationDim);
+ return result;
+ }
+
+ float quantizationScale = tensorPtr->quantizationScale();
+ int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
// two statements (on purpose) for easier debugging:
armnn::TensorInfo result(size,
outputDims.data(),