IVGCVSW-2646 Add Serializer & Deserializer for Conv2D

 * Added Convolution2dLayer to Schema.fbs
 * Added ConstTensorData serialization and deserialization helper functions
 * Added Convolution2d serialization and deserialization support
 * Added serialization and deserialization unit tests

Change-Id: Id376c08410ae01511972a2b0abdce9cfab907462
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
diff --git a/src/armnnSerializer/Schema.fbs b/src/armnnSerializer/Schema.fbs
index 2b96ad8..cbc7da0 100644
--- a/src/armnnSerializer/Schema.fbs
+++ b/src/armnnSerializer/Schema.fbs
@@ -74,7 +74,8 @@
     Output = 3,
     Pooling2d = 4,
     Reshape = 5,
-    Softmax = 6
+    Softmax = 6,
+    Convolution2d = 7
 }
 
 // Base layer table to be used as part of other layers
@@ -96,6 +97,24 @@
     base:LayerBase;
 }
 
+table Convolution2dLayer {
+    base:LayerBase;
+    descriptor:Convolution2dDescriptor;
+    weights:ConstTensor;
+    biases:ConstTensor;
+}
+
+table Convolution2dDescriptor {
+    padLeft:uint;
+    padRight:uint;
+    padTop:uint;
+    padBottom:uint;
+    strideX:uint;
+    strideY:uint;
+    biasEnabled:bool = false;
+    dataLayout:DataLayout = NCHW;
+}
+
 table InputLayer {
     base:BindableLayerBase;
 }
@@ -164,6 +183,7 @@
 
 union Layer {
     AdditionLayer,
+    Convolution2dLayer,
     InputLayer,
     MultiplicationLayer,
     OutputLayer,
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index b229ae7..f475be1 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -91,6 +91,44 @@
     CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
 }
 
+// Build FlatBuffer for Convolution2dLayer
+void SerializerVisitor::VisitConvolution2dLayer(const IConnectableLayer* layer,
+                                                const Convolution2dDescriptor& descriptor,
+                                                const ConstTensor& weights,
+                                                const Optional<ConstTensor>& biases,
+                                                const char* name)
+{
+    // Create FlatBuffer BaseLayer
+    auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Convolution2d);
+
+    auto flatBufferDescriptor = CreateConvolution2dDescriptor(m_flatBufferBuilder,
+                                                              descriptor.m_PadLeft,
+                                                              descriptor.m_PadRight,
+                                                              descriptor.m_PadTop,
+                                                              descriptor.m_PadBottom,
+                                                              descriptor.m_StrideX,
+                                                              descriptor.m_StrideY,
+                                                              descriptor.m_BiasEnabled,
+                                                              GetFlatBufferDataLayout(descriptor.m_DataLayout));
+    auto flatBufferWeightsConstTensorInfo = CreateConstTensorInfo(weights);
+    flatbuffers::Offset<serializer::ConstTensor> flatBufferBiasesConstTensorInfo;
+
+    if (biases.has_value())
+    {
+        flatBufferBiasesConstTensorInfo = CreateConstTensorInfo(biases.value());
+    }
+
+    // Create the FlatBuffer Convolution2dLayer
+    auto flatBufferLayer = CreateConvolution2dLayer(m_flatBufferBuilder,
+                                                    flatBufferBaseLayer,
+                                                    flatBufferDescriptor,
+                                                    flatBufferWeightsConstTensorInfo,
+                                                    flatBufferBiasesConstTensorInfo);
+
+    // Add the AnyLayer to the FlatBufferLayers
+    CreateAnyLayer(flatBufferLayer.o, serializer::Layer::Layer_Convolution2dLayer);
+}
+
 // Build FlatBuffer for Multiplication Layer
 void SerializerVisitor::VisitMultiplicationLayer(const IConnectableLayer* layer, const char* name)
 {
@@ -200,9 +238,78 @@
     m_serializedLayers.push_back(anyLayer);
 }
 
+template <typename T>
+flatbuffers::Offset<flatbuffers::Vector<T>> SerializerVisitor::CreateDataVector(const void* memory, unsigned int size)
+{
+    const T* buffer = reinterpret_cast<const T*>(memory);
+    std::vector<T> vector(buffer, buffer + (size / sizeof(T)));
+    auto fbVector = m_flatBufferBuilder.CreateVector(vector);
+    return fbVector;
+}
+
+flatbuffers::Offset<serializer::ConstTensor> SerializerVisitor::CreateConstTensorInfo(const ConstTensor& constTensor)
+{
+    TensorInfo tensorInfo = constTensor.GetInfo();
+
+    // Get the dimensions
+    std::vector<unsigned int> shape;
+
+    for(unsigned int dim = 0; dim < tensorInfo.GetShape().GetNumDimensions(); ++dim)
+    {
+        shape.push_back(tensorInfo.GetShape()[dim]);
+    }
+
+    // Create FlatBuffer TensorInfo
+    auto flatBufferTensorInfo = serializer::CreateTensorInfo(m_flatBufferBuilder,
+                                                             m_flatBufferBuilder.CreateVector(shape),
+                                                             GetFlatBufferDataType(tensorInfo.GetDataType()),
+                                                             tensorInfo.GetQuantizationScale(),
+                                                             tensorInfo.GetQuantizationOffset());
+    flatbuffers::Offset<void> fbPayload;
+
+    switch (tensorInfo.GetDataType())
+    {
+        case DataType::Float32:
+        case DataType::Signed32:
+        {
+            auto fbVector = CreateDataVector<int32_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
+            flatbuffers::Offset<serializer::IntData> flatBuffersData = serializer::CreateIntData(
+                    m_flatBufferBuilder,
+                    fbVector);
+            fbPayload = flatBuffersData.o;
+            break;
+        }
+        case DataType::Float16:
+        {
+            auto fbVector = CreateDataVector<int16_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
+            flatbuffers::Offset<serializer::ShortData> flatBuffersData = serializer::CreateShortData(
+                    m_flatBufferBuilder,
+                    fbVector);
+            fbPayload = flatBuffersData.o;
+            break;
+        }
+        case DataType::QuantisedAsymm8:
+        case DataType::Boolean:
+        default:
+        {
+            auto fbVector = CreateDataVector<int8_t>(constTensor.GetMemoryArea(), constTensor.GetNumBytes());
+            flatbuffers::Offset<serializer::ByteData> flatBuffersData = serializer::CreateByteData(
+                    m_flatBufferBuilder,
+                    fbVector);
+            fbPayload = flatBuffersData.o;
+        }
+    }
+    flatbuffers::Offset<serializer::ConstTensor> flatBufferConstTensor = serializer::CreateConstTensor(
+            m_flatBufferBuilder,
+            flatBufferTensorInfo,
+            GetFlatBufferConstTensorData(tensorInfo.GetDataType()),
+            fbPayload);
+    return flatBufferConstTensor;
+}
+
 std::vector<fb::Offset<serializer::InputSlot>> SerializerVisitor::CreateInputSlots(const IConnectableLayer* layer)
 {
-    std::vector<fb::Offset <serializer::InputSlot>> inputSlots;
+    std::vector<fb::Offset<serializer::InputSlot>> inputSlots;
 
     // Get the InputSlots
     for (unsigned int slotIndex = 0; slotIndex<layer->GetNumInputSlots(); ++slotIndex)
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index e4485f5..fd1a792 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -45,6 +45,12 @@
     void VisitAdditionLayer(const armnn::IConnectableLayer* layer,
                             const char* name = nullptr) override;
 
+    void VisitConvolution2dLayer(const armnn::IConnectableLayer* layer,
+                                 const armnn::Convolution2dDescriptor& descriptor,
+                                 const armnn::ConstTensor& weights,
+                                 const armnn::Optional<armnn::ConstTensor>& biases,
+                                 const char* = nullptr) override;
+
     void VisitInputLayer(const armnn::IConnectableLayer* layer,
                          armnn::LayerBindingId id,
                          const char* name = nullptr) override;
@@ -78,6 +84,13 @@
     /// Creates the serializer AnyLayer for the layer and adds it to m_serializedLayers.
     void CreateAnyLayer(const flatbuffers::Offset<void>& layer, const armnn::armnnSerializer::Layer serializerLayer);
 
+    /// Creates the serializer ConstTensor for the armnn ConstTensor.
+    flatbuffers::Offset<armnn::armnnSerializer::ConstTensor> CreateConstTensorInfo(
+            const armnn::ConstTensor& constTensor);
+
+    template <typename T>
+    flatbuffers::Offset<flatbuffers::Vector<T>> CreateDataVector(const void* memory, unsigned int size);
+
     ///Function which maps Guid to an index
     uint32_t GetSerializedId(unsigned int guid);
 
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
index 5772eab..2bad85e 100644
--- a/src/armnnSerializer/SerializerUtils.cpp
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -11,6 +11,23 @@
 using namespace armnn;
 namespace serializer = armnn::armnnSerializer;
 
+serializer::ConstTensorData GetFlatBufferConstTensorData(DataType dataType)
+{
+    switch (dataType)
+    {
+        case DataType::Float32:
+        case DataType::Signed32:
+            return serializer::ConstTensorData::ConstTensorData_IntData;
+        case DataType::Float16:
+            return serializer::ConstTensorData::ConstTensorData_ShortData;
+        case DataType::QuantisedAsymm8:
+        case DataType::Boolean:
+            return serializer::ConstTensorData::ConstTensorData_ByteData;
+        default:
+            return serializer::ConstTensorData::ConstTensorData_NONE;
+    }
+}
+
 serializer::DataType GetFlatBufferDataType(DataType dataType)
 {
     switch (dataType)
diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp
index 72a8806..06f3076 100644
--- a/src/armnnSerializer/SerializerUtils.hpp
+++ b/src/armnnSerializer/SerializerUtils.hpp
@@ -11,6 +11,8 @@
 namespace armnnSerializer
 {
 
+armnn::armnnSerializer::ConstTensorData GetFlatBufferConstTensorData(armnn::DataType dataType);
+
 armnn::armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType);
 
 armnn::armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout);
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 77bf786..31ef045 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -65,6 +65,87 @@
     BOOST_TEST(stream.str().length() > 0);
 }
 
+BOOST_AUTO_TEST_CASE(Conv2dSerialization)
+{
+    armnn::IRuntime::CreationOptions options; // default options
+    armnn::IRuntimePtr run = armnn::IRuntime::Create(options);
+
+    armnnDeserializeParser::IDeserializeParserPtr parser = armnnDeserializeParser::IDeserializeParser::Create();
+
+    armnn::TensorInfo inputInfo(armnn::TensorShape({1, 5, 5, 1}), armnn::DataType::Float32, 1.0f, 0);
+    armnn::TensorInfo outputInfo(armnn::TensorShape({1, 3, 3, 1}), armnn::DataType::Float32, 4.0f, 0);
+
+    armnn::TensorInfo weightsInfo(armnn::TensorShape({1, 3, 3, 1}), armnn::DataType::Float32, 2.0f, 0);
+
+    std::vector<float> weightsData({4, 5, 6, 0, 0, 0, 3, 2, 1});
+
+    // Construct network
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+
+    armnn::Convolution2dDescriptor descriptor;
+    descriptor.m_PadLeft = 1;
+    descriptor.m_PadRight = 1;
+    descriptor.m_PadTop = 1;
+    descriptor.m_PadBottom = 1;
+    descriptor.m_StrideX = 2;
+    descriptor.m_StrideY = 2;
+    descriptor.m_BiasEnabled = false;
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+    armnn::ConstTensor weights(weightsInfo, weightsData);
+
+    armnn::IConnectableLayer* const inputLayer  = network->AddInputLayer(0, "input");
+    armnn::IConnectableLayer* const convLayer   = network->AddConvolution2dLayer(descriptor, weights, "conv");
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0, "output");
+
+    inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+    convLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    armnnSerializer::Serializer serializer;
+    serializer.Serialize(*network);
+
+    std::stringstream stream;
+    serializer.SaveSerializedToStream(stream);
+
+    std::string const serializerString{stream.str()};
+    std::vector<std::uint8_t> const serializerVector{serializerString.begin(), serializerString.end()};
+
+    armnn::INetworkPtr deserializedNetwork = parser->CreateNetworkFromBinary(serializerVector);
+
+    auto deserializedOptimized = Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, run->GetDeviceSpec());
+
+    armnn::NetworkId networkIdentifier;
+
+    // Load graph into runtime
+    run->LoadNetwork(networkIdentifier, std::move(deserializedOptimized));
+
+    std::vector<float> inputData
+    {
+            1, 5, 2, 3, 5, 8, 7, 3, 6, 3, 3, 3, 9, 1, 9, 4, 1, 8, 1, 3, 6, 8, 1, 9, 2
+    };
+    armnn::InputTensors inputTensors
+    {
+            {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+    };
+
+    std::vector<float> expectedOutputData
+    {
+            23, 33, 24, 91, 99, 48, 26, 50, 19
+    };
+
+    std::vector<float> outputData(9);
+    armnn::OutputTensors outputTensors
+    {
+            {0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
+    };
+    run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+    BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(),
+                                  expectedOutputData.begin(), expectedOutputData.end());
+}
+
 BOOST_AUTO_TEST_CASE(SimpleNetworkWithMultiplicationSerialization)
 {
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);