IVGCVSW-5648 Adding serializer support for m_DimensionsSpecificity

The field m_DimensionsSpecificity in TensorShape was not being serialized
and deserialized following implementation of type 1 dynamic tensors.

* Update schema.
* Add to Serializer and Deserializer.

Signed-off-by: Colm Donelan <Colm.Donelan@arm.com>
Change-Id: I7ddbdaf54c8f4b988c6cb300f90ba848a94bdad0
diff --git a/src/armnnSerializer/ArmnnSchema_generated.h b/src/armnnSerializer/ArmnnSchema_generated.h
index cb9e686..524ffb0 100644
--- a/src/armnnSerializer/ArmnnSchema_generated.h
+++ b/src/armnnSerializer/ArmnnSchema_generated.h
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 // automatically generated by the FlatBuffers compiler, do not modify
@@ -1633,7 +1633,8 @@
     VT_QUANTIZATIONOFFSET = 10,
     VT_QUANTIZATIONSCALES = 12,
     VT_QUANTIZATIONDIM = 14,
-    VT_DIMENSIONALITY = 16
+    VT_DIMENSIONALITY = 16,
+    VT_DIMENSIONSPECIFICITY = 18
   };
   const flatbuffers::Vector<uint32_t> *dimensions() const {
     return GetPointer<const flatbuffers::Vector<uint32_t> *>(VT_DIMENSIONS);
@@ -1656,6 +1657,9 @@
   uint32_t dimensionality() const {
     return GetField<uint32_t>(VT_DIMENSIONALITY, 1);
   }
+  const flatbuffers::Vector<uint8_t> *dimensionSpecificity() const {
+    return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_DIMENSIONSPECIFICITY);
+  }
   bool Verify(flatbuffers::Verifier &verifier) const {
     return VerifyTableStart(verifier) &&
            VerifyOffset(verifier, VT_DIMENSIONS) &&
@@ -1667,6 +1671,8 @@
            verifier.VerifyVector(quantizationScales()) &&
            VerifyField<uint32_t>(verifier, VT_QUANTIZATIONDIM) &&
            VerifyField<uint32_t>(verifier, VT_DIMENSIONALITY) &&
+           VerifyOffset(verifier, VT_DIMENSIONSPECIFICITY) &&
+           verifier.VerifyVector(dimensionSpecificity()) &&
            verifier.EndTable();
   }
 };
@@ -1696,6 +1702,9 @@
   void add_dimensionality(uint32_t dimensionality) {
     fbb_.AddElement<uint32_t>(TensorInfo::VT_DIMENSIONALITY, dimensionality, 1);
   }
+  void add_dimensionSpecificity(flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity) {
+    fbb_.AddOffset(TensorInfo::VT_DIMENSIONSPECIFICITY, dimensionSpecificity);
+  }
   explicit TensorInfoBuilder(flatbuffers::FlatBufferBuilder &_fbb)
         : fbb_(_fbb) {
     start_ = fbb_.StartTable();
@@ -1716,8 +1725,10 @@
     int32_t quantizationOffset = 0,
     flatbuffers::Offset<flatbuffers::Vector<float>> quantizationScales = 0,
     uint32_t quantizationDim = 0,
-    uint32_t dimensionality = 1) {
+    uint32_t dimensionality = 1,
+    flatbuffers::Offset<flatbuffers::Vector<uint8_t>> dimensionSpecificity = 0) {
   TensorInfoBuilder builder_(_fbb);
+  builder_.add_dimensionSpecificity(dimensionSpecificity);
   builder_.add_dimensionality(dimensionality);
   builder_.add_quantizationDim(quantizationDim);
   builder_.add_quantizationScales(quantizationScales);
@@ -1736,9 +1747,11 @@
     int32_t quantizationOffset = 0,
     const std::vector<float> *quantizationScales = nullptr,
     uint32_t quantizationDim = 0,
-    uint32_t dimensionality = 1) {
+    uint32_t dimensionality = 1,
+    const std::vector<uint8_t> *dimensionSpecificity = nullptr) {
   auto dimensions__ = dimensions ? _fbb.CreateVector<uint32_t>(*dimensions) : 0;
   auto quantizationScales__ = quantizationScales ? _fbb.CreateVector<float>(*quantizationScales) : 0;
+  auto dimensionSpecificity__ = dimensionSpecificity ? _fbb.CreateVector<uint8_t>(*dimensionSpecificity) : 0;
   return armnnSerializer::CreateTensorInfo(
       _fbb,
       dimensions__,
@@ -1747,7 +1760,8 @@
       quantizationOffset,
       quantizationScales__,
       quantizationDim,
-      dimensionality);
+      dimensionality,
+      dimensionSpecificity__);
 }
 
 struct ByteData FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {