Rename quantized data types to remove ambiguity for signed/unsigned payloads

!android-nn-driver:2572

Change-Id: I8fe52ceb09987b3d05c539409510f535165455cc
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index bad95cf..0d30d96 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -30,10 +30,12 @@
 enum DataType : byte {
     Float16 = 0,
     Float32 = 1,
-    QuantisedAsymm8 = 2,
+    QuantisedAsymm8 = 2, // deprecated
     Signed32 = 3,
     Boolean = 4,
-    QuantisedSymm16 = 5
+    QuantisedSymm16 = 5, // deprecated
+    QAsymmU8 = 6,
+    QSymmS16 = 7
 }
 
 enum DataLayout : byte {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index 608a9c3..be6fa64 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1405,7 +1405,7 @@
             fbPayload = flatBuffersData.o;
             break;
         }
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
         {
             auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
             flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
@@ -1414,7 +1414,7 @@
             fbPayload = flatBuffersData.o;
             break;
         }
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         case armnn::DataType::Boolean:
         default:
         {
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index 908da64..df1ef28 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -36,9 +36,9 @@
         case armnn::DataType::Signed32:
             return armnnSerializer::ConstTensorData::ConstTensorData_IntData;
         case armnn::DataType::Float16:
-        case armnn::DataType::QuantisedSymm16:
+        case armnn::DataType::QSymmS16:
             return armnnSerializer::ConstTensorData::ConstTensorData_ShortData;
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         case armnn::DataType::Boolean:
             return armnnSerializer::ConstTensorData::ConstTensorData_ByteData;
         default:
@@ -56,10 +56,10 @@
             return armnnSerializer::DataType::DataType_Float16;
         case armnn::DataType::Signed32:
             return armnnSerializer::DataType::DataType_Signed32;
-        case armnn::DataType::QuantisedSymm16:
-            return armnnSerializer::DataType::DataType_QuantisedSymm16;
-        case armnn::DataType::QuantisedAsymm8:
-            return armnnSerializer::DataType::DataType_QuantisedAsymm8;
+        case armnn::DataType::QSymmS16:
+            return armnnSerializer::DataType::DataType_QSymmS16;
+        case armnn::DataType::QAsymmU8:
+            return armnnSerializer::DataType::DataType_QAsymmU8;
         case armnn::DataType::Boolean:
             return armnnSerializer::DataType::DataType_Boolean;
         default:
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 8dfca3c..3e67cf0 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -206,7 +206,7 @@
             CompareConstTensorData<const float*>(
                 tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
             break;
-        case armnn::DataType::QuantisedAsymm8:
+        case armnn::DataType::QAsymmU8:
         case armnn::DataType::Boolean:
             CompareConstTensorData<const uint8_t*>(
                 tensor1.GetMemoryArea(), tensor2.GetMemoryArea(), tensor1.GetNumElements());
@@ -770,7 +770,7 @@
     DECLARE_LAYER_VERIFIER_CLASS(Dequantize)
 
     const std::string layerName("dequantize");
-    const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QuantisedAsymm8, 0.5f, 1);
+    const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QAsymmU8, 0.5f, 1);
     const armnn::TensorInfo outputInfo({ 1, 5, 2, 3 }, armnn::DataType::Float32);
 
     armnn::INetworkPtr network = armnn::INetwork::Create();
@@ -1152,8 +1152,8 @@
     };
 
     const std::string layerName("gather");
-    armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo paramsInfo({ 8 }, armnn::DataType::QAsymmU8);
+    armnn::TensorInfo outputInfo({ 3 }, armnn::DataType::QAsymmU8);
     const armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32);
 
     paramsInfo.SetQuantizationScale(1.0f);
@@ -3994,7 +3994,7 @@
     armnn::TensorShape inputToInputWeightsShape = {4, 2};
     std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
     armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
-                                              armnn::DataType::QuantisedAsymm8,
+                                              armnn::DataType::QAsymmU8,
                                               weightsScale,
                                               weightsOffset);
     armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
@@ -4002,7 +4002,7 @@
     armnn::TensorShape inputToForgetWeightsShape = {4, 2};
     std::vector<uint8_t> inputToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
     armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
-                                               armnn::DataType::QuantisedAsymm8,
+                                               armnn::DataType::QAsymmU8,
                                                weightsScale,
                                                weightsOffset);
     armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
@@ -4010,7 +4010,7 @@
     armnn::TensorShape inputToCellWeightsShape = {4, 2};
     std::vector<uint8_t> inputToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
     armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
-                                             armnn::DataType::QuantisedAsymm8,
+                                             armnn::DataType::QAsymmU8,
                                              weightsScale,
                                              weightsOffset);
     armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
@@ -4018,7 +4018,7 @@
     armnn::TensorShape inputToOutputWeightsShape = {4, 2};
     std::vector<uint8_t> inputToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8};
     armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
-                                               armnn::DataType::QuantisedAsymm8,
+                                               armnn::DataType::QAsymmU8,
                                                weightsScale,
                                                weightsOffset);
     armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
@@ -4027,7 +4027,7 @@
     armnn::TensorShape recurrentToInputWeightsShape = {4, 4};
     std::vector<uint8_t> recurrentToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
-                                                  armnn::DataType::QuantisedAsymm8,
+                                                  armnn::DataType::QAsymmU8,
                                                   weightsScale,
                                                   weightsOffset);
     armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
@@ -4035,7 +4035,7 @@
     armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
     std::vector<uint8_t> recurrentToForgetWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
-                                                   armnn::DataType::QuantisedAsymm8,
+                                                   armnn::DataType::QAsymmU8,
                                                    weightsScale,
                                                    weightsOffset);
     armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
@@ -4043,7 +4043,7 @@
     armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
     std::vector<uint8_t> recurrentToCellWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
-                                                 armnn::DataType::QuantisedAsymm8,
+                                                 armnn::DataType::QAsymmU8,
                                                  weightsScale,
                                                  weightsOffset);
     armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
@@ -4051,7 +4051,7 @@
     armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
     std::vector<uint8_t> recurrentToOutputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
     armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
-                                                   armnn::DataType::QuantisedAsymm8,
+                                                   armnn::DataType::QAsymmU8,
                                                    weightsScale,
                                                    weightsOffset);
     armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
@@ -4114,15 +4114,15 @@
 
     // Connect up
     armnn::TensorInfo inputTensorInfo({ batchSize, inputSize },
-                                      armnn::DataType::QuantisedAsymm8,
+                                      armnn::DataType::QAsymmU8,
                                       inputOutputScale,
                                       inputOutputOffset);
     armnn::TensorInfo cellStateTensorInfo({ batchSize, numUnits },
-                                          armnn::DataType::QuantisedSymm16,
+                                          armnn::DataType::QSymmS16,
                                           cellStateScale,
                                           cellStateOffset);
     armnn::TensorInfo outputStateTensorInfo({ batchSize, outputSize },
-                                            armnn::DataType::QuantisedAsymm8,
+                                            armnn::DataType::QAsymmU8,
                                             inputOutputScale,
                                             inputOutputOffset);