IVGCVSW-5648 Adding serializer support for m_DimensionsSpecificity

The field m_DimensionsSpecificity in TensorShape was not being serialized
and deserialized following implementation of type 1 dynamic tensors.

* Update schema.
* Add to Serializer and Deserializer.

Signed-off-by: Colm Donelan <Colm.Donelan@arm.com>
Change-Id: I7ddbdaf54c8f4b988c6cb300f90ba848a94bdad0
diff --git a/src/armnnDeserializer/Deserializer.cpp b/src/armnnDeserializer/Deserializer.cpp
index cbc4758..9b4cbe9 100644
--- a/src/armnnDeserializer/Deserializer.cpp
+++ b/src/armnnDeserializer/Deserializer.cpp
@@ -608,45 +608,63 @@
         }
     }
 
+    float quantizationScale = tensorPtr->quantizationScale();
+    int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
     if (tensorPtr->dimensionality() == static_cast<unsigned int>(Dimensionality::Scalar))
     {
-        float quantizationScale = tensorPtr->quantizationScale();
-        int32_t quantizationOffset = tensorPtr->quantizationOffset();
-
-        return armnn::TensorInfo(armnn::TensorShape{armnn::Dimensionality::Scalar},
+        return armnn::TensorInfo(TensorShape{armnn::Dimensionality::Scalar},
                                  type,
                                  quantizationScale,
                                  quantizationOffset);
     }
+    else if (tensorPtr->dimensionality() == static_cast<unsigned int>(Dimensionality::NotSpecified))
+    {
+        armnn::TensorInfo result(TensorShape{Dimensionality::NotSpecified},
+                                 type,
+                                 quantizationScale,
+                                 quantizationOffset);
+        return result;
+    }
 
     auto dimensions = tensorPtr->dimensions();
     unsigned int size = dimensions->size();
     std::vector<unsigned int> outputDims(dimensions->begin(), dimensions->begin() + size);
+    bool dimensionsSpecificity[armnn::MaxNumOfTensorDimensions];
+    std::fill_n(dimensionsSpecificity, armnn::MaxNumOfTensorDimensions, true);
+    // For backwards compatibility check if the dimensionSpecificity vector is present first.
+    // The default is to have dimensionSpecificity set to all true's anyway.
+    if (tensorPtr->dimensionSpecificity() != nullptr)
+    {
+        auto dimensionSpecificity = tensorPtr->dimensionSpecificity();
+        size = dimensionSpecificity->size();
+        for (unsigned int i = 0; i < size; ++i)
+        {
+            dimensionsSpecificity[i] = dimensionSpecificity->Get(i);
+        }
+    }
+    // Construct a TensorShape
+    TensorShape shape(size, outputDims.data(), dimensionsSpecificity);
 
     auto quantizationScales = tensorPtr->quantizationScales();
-
     if (quantizationScales)
     {
         unsigned int quantizationScalesSize = quantizationScales->size();
         std::vector<float> scales(quantizationScales->begin(), quantizationScales->begin() + quantizationScalesSize);
         unsigned int quantizationDim = tensorPtr->quantizationDim();
-        armnn::TensorInfo result(size,
-                                 outputDims.data(),
+        armnn::TensorInfo result(shape,
                                  type,
                                  scales,
                                  quantizationDim);
         return result;
     }
 
-    float quantizationScale = tensorPtr->quantizationScale();
-    int32_t quantizationOffset = tensorPtr->quantizationOffset();
-
     // two statements (on purpose) for easier debugging:
-    armnn::TensorInfo result(size,
-                             outputDims.data(),
+    armnn::TensorInfo result(shape,
                              type,
                              quantizationScale,
                              quantizationOffset);
+    
     return result;
 }
 
diff --git a/src/armnnSerializer/ArmnnSchema.fbs b/src/armnnSerializer/ArmnnSchema.fbs
index 9dbf6aa..e2b3a3c 100644
--- a/src/armnnSerializer/ArmnnSchema.fbs
+++ b/src/armnnSerializer/ArmnnSchema.fbs
@@ -67,6 +67,7 @@
     quantizationScales:[float];
     quantizationDim:uint;
     dimensionality:uint = 1;
+    dimensionSpecificity:[bool];
 }
 
 struct Connection {
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index cb9e686..524ffb0 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 // automatically generated by the FlatBuffers compiler, do not modify
@@ -1633,7 +1633,8 @@
     VT_QUANTIZATIONOFFSET = 10,
     VT_QUANTIZATIONSCALES = 12,
     VT_QUANTIZATIONDIM = 14,
-    VT_DIMENSIONALITY = 16
+    VT_DIMENSIONALITY = 16,
+    VT_DIMENSIONSPECIFICITY = 18
   };
   const flatbuffers::Vector<uint32_t> *dimensions() const {
     return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_DIMENSIONS);
@@ -1656,6 +1657,9 @@
   uint32_t dimensionality() const {
     return GetField<uint32_t>(VT_DIMENSIONALITY, 1);
   }
+  const flatbuffers::Vector<uint8_t> *dimensionSpecificity() const {
+    return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DIMENSIONSPECIFICITY);
+  }
   bool Verify(flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_DIMENSIONS) &&
@@ -1667,6 +1671,8 @@
            verifier.VerifyVector(quantizationScales()) &&
            VerifyField<uint32_t>(verifier, VT_QUANTIZATIONDIM) &&
            VerifyField<uint32_t>(verifier, VT_DIMENSIONALITY) &&
+           VerifyOffset(verifier, VT_DIMENSIONSPECIFICITY) &&
+           verifier.VerifyVector(dimensionSpecificity()) &&
            verifier.EndTable();
   }
 };
@@ -1696,6 +1702,9 @@
   void add_dimensionality(uint32_t dimensionality) {
     fbb_.AddElement<uint32_t>(TensorInfo::VT_DIMENSIONALITY, dimensionality, 1);
   }
+  void add_dimensionSpecificity(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity) {
+    fbb_.AddOffset(TensorInfo::VT_DIMENSIONSPECIFICITY, dimensionSpecificity);
+  }
   explicit TensorInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
@@ -1716,8 +1725,10 @@
     int32_t quantizationOffset = 0,
     flatbuffers::Offset<flatbuffers::Vector<float>> quantizationScales = 0,
     uint32_t quantizationDim = 0,
-    uint32_t dimensionality = 1) {
+    uint32_t dimensionality = 1,
+    flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity = 0) {
   TensorInfoBuilder builder_(_fbb);
+  builder_.add_dimensionSpecificity(dimensionSpecificity);
   builder_.add_dimensionality(dimensionality);
   builder_.add_quantizationDim(quantizationDim);
   builder_.add_quantizationScales(quantizationScales);
@@ -1736,9 +1747,11 @@
     int32_t quantizationOffset = 0,
     const std::vector<float> *quantizationScales = nullptr,
     uint32_t quantizationDim = 0,
-    uint32_t dimensionality = 1) {
+    uint32_t dimensionality = 1,
+    const std::vector<uint8_t> *dimensionSpecificity = nullptr) {
   auto dimensions__ = dimensions ? _fbb.CreateVector<uint32_t>(*dimensions) : 0;
   auto quantizationScales__ = quantizationScales ? _fbb.CreateVector<float>(*quantizationScales) : 0;
+  auto dimensionSpecificity__ = dimensionSpecificity ? _fbb.CreateVector<uint8_t>(*dimensionSpecificity) : 0;
   return armnnSerializer::CreateTensorInfo(
       _fbb,
       dimensions__,
@@ -1747,7 +1760,8 @@
       quantizationOffset,
       quantizationScales__,
       quantizationDim,
-      dimensionality);
+      dimensionality,
+      dimensionSpecificity__);
 }
 
 struct ByteData FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index bcdaa08..0586700 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -1668,6 +1668,14 @@
         shape.push_back(tensorInfo.GetShape()[dim]);
     }
 
+    std::vector<bool> specificity;
+    // This assumes that the TensorShape constructors have ensured that the size of m_DimensionsSpecificity
+    // matches the size of dimensions.
+    for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
+    {
+        specificity.push_back(tensorInfo.GetShape().GetDimensionSpecificity(dim));
+    }
+
     if (tensorInfo.HasPerAxisQuantization())
     {
         // Create FlatBuffer TensorInfo
@@ -1680,7 +1688,8 @@
                                          m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
                                          tensorInfo.GetQuantizationDim().value(),
                                          static_cast<unsigned int>
-                                         (tensorInfo.GetShape().GetDimensionality()));
+                                         (tensorInfo.GetShape().GetDimensionality()),
+                                         m_flatBufferBuilder.CreateVector(specificity));
         return flatBufferTensorInfo;
     }
 
@@ -1693,7 +1702,8 @@
                                                              0,
                                                              0,
                                                              static_cast<unsigned int>
-                                                             (tensorInfo.GetShape().GetDimensionality()));
+                                                             (tensorInfo.GetShape().GetDimensionality()),
+                                                             m_flatBufferBuilder.CreateVector(specificity));
     return flatBufferTensorInfo;
 }