IVGCVSW-2645 Add Serializer & Deserializer for Pooling2d

Change-Id: Iba41da3cccd539a0175f2ed0ff9a8b6a23c5fb6f
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Signed-off-by: Saoirse Stewart <saoirse.stewart@arm.com>
diff --git a/src/armnnSerializer/Schema.fbs b/src/armnnSerializer/Schema.fbs
index 411b89a..048181a 100644
--- a/src/armnnSerializer/Schema.fbs
+++ b/src/armnnSerializer/Schema.fbs
@@ -17,6 +17,11 @@
     Boolean = 4
 }
 
+enum DataLayout : byte {
+    NHWC = 0,
+    NCHW = 1
+}
+
 table TensorInfo {
     dimensions:[uint];
     dataType:DataType;
@@ -67,7 +72,8 @@
     Input = 1,
     Multiplication = 2,
     Output = 3,
-    Softmax = 4
+    Pooling2d = 4,
+    Softmax = 5
 }
 
 // Base layer table to be used as part of other layers
@@ -97,6 +103,42 @@
     base:LayerBase;
 }
 
+table Pooling2dLayer {
+    base:LayerBase;
+    descriptor:Pooling2dDescriptor;
+}
+
+enum PoolingAlgorithm : byte {
+    Max = 0,
+    Average = 1,
+    L2 = 2
+}
+
+enum OutputShapeRounding : byte {
+    Floor = 0,
+    Ceiling = 1
+}
+
+enum PaddingMethod : byte {
+    IgnoreValue = 0,
+    Exclude = 1
+}
+
+table Pooling2dDescriptor {
+    poolType:PoolingAlgorithm;
+    padLeft:uint;
+    padRight:uint;
+    padTop:uint;
+    padBottom:uint;
+    poolWidth:uint;
+    poolHeight:uint;
+    strideX:uint;
+    strideY:uint;
+    outputShapeRounding:OutputShapeRounding;
+    paddingMethod:PaddingMethod;
+    dataLayout:DataLayout;
+}
+
 table SoftmaxLayer {
     base:LayerBase;
     descriptor:SoftmaxDescriptor;
@@ -115,6 +157,7 @@
     InputLayer,
     MultiplicationLayer,
     OutputLayer,
+    Pooling2dLayer,
     SoftmaxLayer
 }
 
diff --git a/src/armnnSerializer/Serializer.cpp b/src/armnnSerializer/Serializer.cpp
index ba4b369..57228c4 100644
--- a/src/armnnSerializer/Serializer.cpp
+++ b/src/armnnSerializer/Serializer.cpp
@@ -4,9 +4,15 @@
 //
 
 #include "Serializer.hpp"
+
+#include "SerializerUtils.hpp"
+
 #include <armnn/ArmNN.hpp>
+
 #include <iostream>
+
 #include <Schema_generated.h>
+
 #include <flatbuffers/util.h>
 
 using namespace armnn;
@@ -16,25 +22,6 @@
 namespace armnnSerializer
 {
 
-serializer::DataType GetFlatBufferDataType(DataType dataType)
-{
-    switch (dataType)
-    {
-        case DataType::Float32:
-            return serializer::DataType::DataType_Float32;
-        case DataType::Float16:
-            return serializer::DataType::DataType_Float16;
-        case DataType::Signed32:
-            return serializer::DataType::DataType_Signed32;
-        case DataType::QuantisedAsymm8:
-            return serializer::DataType::DataType_QuantisedAsymm8;
-        case DataType::Boolean:
-            return serializer::DataType::DataType_Boolean;
-        default:
-            return serializer::DataType::DataType_Float16;
-    }
-}
-
 uint32_t SerializerVisitor::GetSerializedId(unsigned int guid)
 {
     std::pair<unsigned int, uint32_t> guidPair(guid, m_layerId);
@@ -140,6 +127,33 @@
     CreateAnyLayer(flatBufferSoftmaxLayer.o, serializer::Layer::Layer_SoftmaxLayer);
 }
 
+void SerializerVisitor::VisitPooling2dLayer(const IConnectableLayer* layer,
+                                            const Pooling2dDescriptor& pooling2dDescriptor,
+                                            const char* name)
+{
+    auto fbPooling2dBaseLayer  = CreateLayerBase(layer, serializer::LayerType::LayerType_Pooling2d);
+    auto fbPooling2dDescriptor = serializer::CreatePooling2dDescriptor(
+        m_flatBufferBuilder,
+        GetFlatBufferPoolingAlgorithm(pooling2dDescriptor.m_PoolType),
+        pooling2dDescriptor.m_PadLeft,
+        pooling2dDescriptor.m_PadRight,
+        pooling2dDescriptor.m_PadTop,
+        pooling2dDescriptor.m_PadBottom,
+        pooling2dDescriptor.m_PoolWidth,
+        pooling2dDescriptor.m_PoolHeight,
+        pooling2dDescriptor.m_StrideX,
+        pooling2dDescriptor.m_StrideY,
+        GetFlatBufferOutputShapeRounding(pooling2dDescriptor.m_OutputShapeRounding),
+        GetFlatBufferPaddingMethod(pooling2dDescriptor.m_PaddingMethod),
+        GetFlatBufferDataLayout(pooling2dDescriptor.m_DataLayout));
+
+    auto fbPooling2dLayer = serializer::CreatePooling2dLayer(m_flatBufferBuilder,
+                                                             fbPooling2dBaseLayer,
+                                                             fbPooling2dDescriptor);
+
+    CreateAnyLayer(fbPooling2dLayer.o, serializer::Layer::Layer_Pooling2dLayer);
+}
+
 fb::Offset<serializer::LayerBase> SerializerVisitor::CreateLayerBase(const IConnectableLayer* layer,
                                                                      const serializer::LayerType layerType)
 {
diff --git a/src/armnnSerializer/Serializer.hpp b/src/armnnSerializer/Serializer.hpp
index ec26dc1..169ed09 100644
--- a/src/armnnSerializer/Serializer.hpp
+++ b/src/armnnSerializer/Serializer.hpp
@@ -60,6 +60,10 @@
                            const armnn::SoftmaxDescriptor& softmaxDescriptor,
                            const char* name = nullptr) override;
 
+    void VisitPooling2dLayer(const armnn::IConnectableLayer* layer,
+                             const armnn::Pooling2dDescriptor& pooling2dDescriptor,
+                             const char* name = nullptr) override;
+
 private:
 
     /// Creates the Input Slots and Output Slots and LayerBase for the layer.
diff --git a/src/armnnSerializer/SerializerSupport.md b/src/armnnSerializer/SerializerSupport.md
index 617eafb..a94e0ad 100644
--- a/src/armnnSerializer/SerializerSupport.md
+++ b/src/armnnSerializer/SerializerSupport.md
@@ -9,5 +9,6 @@
 * Addition
 * Multiplication
 * Softmax
+* Pooling2d
 
 More machine learning layers will be supported in future releases.
\ No newline at end of file
diff --git a/src/armnnSerializer/SerializerUtils.cpp b/src/armnnSerializer/SerializerUtils.cpp
new file mode 100644
index 0000000..5772eab
--- /dev/null
+++ b/src/armnnSerializer/SerializerUtils.cpp
@@ -0,0 +1,83 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SerializerUtils.hpp"
+
+namespace armnnSerializer
+{
+
+using namespace armnn;
+namespace serializer = armnn::armnnSerializer;
+
+serializer::DataType GetFlatBufferDataType(DataType dataType)
+{
+    switch (dataType)
+    {
+        case DataType::Float32:
+            return serializer::DataType::DataType_Float32;
+        case DataType::Float16:
+            return serializer::DataType::DataType_Float16;
+        case DataType::Signed32:
+            return serializer::DataType::DataType_Signed32;
+        case DataType::QuantisedAsymm8:
+            return serializer::DataType::DataType_QuantisedAsymm8;
+        case DataType::Boolean:
+            return serializer::DataType::DataType_Boolean;
+        default:
+            return serializer::DataType::DataType_Float16;
+    }
+}
+
+serializer::DataLayout GetFlatBufferDataLayout(DataLayout dataLayout)
+{
+    switch (dataLayout)
+    {
+        case DataLayout::NHWC:
+            return serializer::DataLayout::DataLayout_NHWC;
+        case DataLayout::NCHW:
+        default:
+            return serializer::DataLayout::DataLayout_NCHW;
+    }
+}
+
+serializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(PoolingAlgorithm poolingAlgorithm)
+{
+    switch (poolingAlgorithm)
+    {
+        case PoolingAlgorithm::Average:
+            return serializer::PoolingAlgorithm::PoolingAlgorithm_Average;
+        case PoolingAlgorithm::L2:
+            return serializer::PoolingAlgorithm::PoolingAlgorithm_L2;
+        case PoolingAlgorithm::Max:
+        default:
+            return serializer::PoolingAlgorithm::PoolingAlgorithm_Max;
+    }
+}
+
+serializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(OutputShapeRounding outputShapeRounding)
+{
+    switch (outputShapeRounding)
+    {
+        case OutputShapeRounding::Ceiling:
+            return serializer::OutputShapeRounding::OutputShapeRounding_Ceiling;
+        case OutputShapeRounding::Floor:
+        default:
+            return serializer::OutputShapeRounding::OutputShapeRounding_Floor;
+    }
+}
+
+serializer::PaddingMethod GetFlatBufferPaddingMethod(PaddingMethod paddingMethod)
+{
+    switch (paddingMethod)
+    {
+        case PaddingMethod::IgnoreValue:
+            return serializer::PaddingMethod::PaddingMethod_IgnoreValue;
+        case PaddingMethod::Exclude:
+        default:
+            return serializer::PaddingMethod::PaddingMethod_Exclude;
+    }
+}
+
+} // namespace armnnSerializer
\ No newline at end of file
diff --git a/src/armnnSerializer/SerializerUtils.hpp b/src/armnnSerializer/SerializerUtils.hpp
new file mode 100644
index 0000000..72a8806
--- /dev/null
+++ b/src/armnnSerializer/SerializerUtils.hpp
@@ -0,0 +1,25 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+
+#include <Schema_generated.h>
+
+namespace armnnSerializer
+{
+
+armnn::armnnSerializer::DataType GetFlatBufferDataType(armnn::DataType dataType);
+
+armnn::armnnSerializer::DataLayout GetFlatBufferDataLayout(armnn::DataLayout dataLayout);
+
+armnn::armnnSerializer::PoolingAlgorithm GetFlatBufferPoolingAlgorithm(armnn::PoolingAlgorithm poolingAlgorithm);
+
+armnn::armnnSerializer::OutputShapeRounding GetFlatBufferOutputShapeRounding(
+    armnn::OutputShapeRounding outputShapeRounding);
+
+armnn::armnnSerializer::PaddingMethod GetFlatBufferPaddingMethod(armnn::PaddingMethod paddingMethod);
+
+} // namespace armnnSerializer
\ No newline at end of file
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 5b55682..4b6bf1e 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -15,12 +15,34 @@
 #include <vector>
 
 #include <boost/test/unit_test.hpp>
-
 #include <flatbuffers/idl.h>
 
-BOOST_AUTO_TEST_SUITE(SerializerTests)
+using armnnDeserializeParser::IDeserializeParser;
 
-armnnDeserializeParser::IDeserializeParserPtr g_Parser = armnnDeserializeParser::IDeserializeParser::Create();
+namespace
+{
+
+armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString)
+{
+    std::vector<std::uint8_t> const serializerVector{serializerString.begin(), serializerString.end()};
+    return armnnDeserializeParser::IDeserializeParser::Create()->CreateNetworkFromBinary(serializerVector);
+}
+
+std::string SerializeNetwork(const armnn::INetwork& network)
+{
+    armnnSerializer::Serializer serializer;
+    serializer.Serialize(network);
+
+    std::stringstream stream;
+    serializer.SaveSerializedToStream(stream);
+
+    std::string serializerString{stream.str()};
+    return serializerString;
+}
+
+} // anonymous namespace
+
+BOOST_AUTO_TEST_SUITE(SerializerTests)
 
 BOOST_AUTO_TEST_CASE(SimpleNetworkSerialization)
 {
@@ -78,55 +100,47 @@
 
     // Create test network
     armnn::INetworkPtr network = armnn::INetwork::Create();
-    armnn::IConnectableLayer *const inputLayer   = network->AddInputLayer(0);
-    armnn::IConnectableLayer *const softmaxLayer = network->AddSoftmaxLayer(descriptor, "softmax");
-    armnn::IConnectableLayer *const outputLayer  = network->AddOutputLayer(0);
+    armnn::IConnectableLayer* const inputLayer   = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const softmaxLayer = network->AddSoftmaxLayer(descriptor, "softmax");
+    armnn::IConnectableLayer* const outputLayer  = network->AddOutputLayer(0);
 
     inputLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
     inputLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
     softmaxLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
     softmaxLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
-    // Serialize
-    armnnSerializer::Serializer serializer;
-    serializer.Serialize(*network);
-    std::stringstream stream;
-    serializer.SaveSerializedToStream(stream);
-    const std::string serializerString{stream.str()};
-
-    // Deserialize
-    armnn::INetworkPtr deserializedNetwork =
-        g_Parser->CreateNetworkFromBinary({serializerString.begin(), serializerString.end()});
+    // Serialize & deserialize network
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
     BOOST_CHECK(deserializedNetwork);
 
     armnn::IRuntime::CreationOptions options;
-    armnn::IRuntimePtr run = armnn::IRuntime::Create(options);
+    armnn::IRuntimePtr runtime = armnn::IRuntime::Create(options);
 
     armnn::IOptimizedNetworkPtr optimizedNetwork =
-        armnn::Optimize(*network, {armnn::Compute::CpuRef}, run->GetDeviceSpec());
+        armnn::Optimize(*network, {armnn::Compute::CpuRef}, runtime->GetDeviceSpec());
     BOOST_CHECK(optimizedNetwork);
 
     armnn::IOptimizedNetworkPtr deserializedOptimizedNetwork =
-        armnn::Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, run->GetDeviceSpec());
+        armnn::Optimize(*deserializedNetwork, {armnn::Compute::CpuRef}, runtime->GetDeviceSpec());
     BOOST_CHECK(deserializedOptimizedNetwork);
 
     armnn::NetworkId networkId1;
     armnn::NetworkId networkId2;
 
-    run->LoadNetwork(networkId1, std::move(optimizedNetwork));
-    run->LoadNetwork(networkId2, std::move(deserializedOptimizedNetwork));
+    runtime->LoadNetwork(networkId1, std::move(optimizedNetwork));
+    runtime->LoadNetwork(networkId2, std::move(deserializedOptimizedNetwork));
 
     std::vector<float> inputData(tensorInfo.GetNumElements());
     std::iota(inputData.begin(), inputData.end(), 0);
 
     armnn::InputTensors inputTensors1
     {
-         {0, armnn::ConstTensor(run->GetInputTensorInfo(networkId1, 0), inputData.data())}
+         {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkId1, 0), inputData.data())}
     };
 
     armnn::InputTensors inputTensors2
     {
-         {0, armnn::ConstTensor(run->GetInputTensorInfo(networkId2, 0), inputData.data())}
+         {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkId2, 0), inputData.data())}
     };
 
     std::vector<float> outputData1(inputData.size());
@@ -134,19 +148,83 @@
 
     armnn::OutputTensors outputTensors1
     {
-         {0, armnn::Tensor(run->GetOutputTensorInfo(networkId1, 0), outputData1.data())}
+         {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkId1, 0), outputData1.data())}
     };
 
     armnn::OutputTensors outputTensors2
     {
-         {0, armnn::Tensor(run->GetOutputTensorInfo(networkId2, 0), outputData2.data())}
+         {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkId2, 0), outputData2.data())}
     };
 
-    run->EnqueueWorkload(networkId1, inputTensors1, outputTensors1);
-    run->EnqueueWorkload(networkId2, inputTensors2, outputTensors2);
+    runtime->EnqueueWorkload(networkId1, inputTensors1, outputTensors1);
+    runtime->EnqueueWorkload(networkId2, inputTensors2, outputTensors2);
 
     BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(),
                                   outputData2.begin(), outputData2.end());
 }
 
+BOOST_AUTO_TEST_CASE(SimplePooling2dIntegration)
+{
+    armnn::NetworkId networkIdentifier;
+    armnn::IRuntime::CreationOptions options; // default options
+    armnn::IRuntimePtr runtime = armnn::IRuntime::Create(options);
+
+    unsigned int inputShape[]  = {1, 2, 2, 1};
+    unsigned int outputShape[] = {1, 1, 1, 1};
+
+    auto inputTensorInfo  = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    armnn::Pooling2dDescriptor desc;
+    desc.m_DataLayout          = armnn::DataLayout::NHWC;
+    desc.m_PadTop              = 0;
+    desc.m_PadBottom           = 0;
+    desc.m_PadLeft             = 0;
+    desc.m_PadRight            = 0;
+    desc.m_PoolType            = armnn::PoolingAlgorithm::Average;
+    desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+    desc.m_PaddingMethod       = armnn::PaddingMethod::Exclude;
+    desc.m_PoolHeight          = 2;
+    desc.m_PoolWidth           = 2;
+    desc.m_StrideX             = 2;
+    desc.m_StrideY             = 2;
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer *const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer *const pooling2dLayer = network->AddPooling2dLayer(desc, "ReshapeLayer");
+    armnn::IConnectableLayer *const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(pooling2dLayer->GetInputSlot(0));
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+    pooling2dLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+    pooling2dLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto deserializeNetwork = DeserializeNetwork(SerializeNetwork(*network));
+
+    //Optimize the deserialized network
+    auto deserializedOptimized = Optimize(*deserializeNetwork, {armnn::Compute::CpuRef},
+                                          runtime->GetDeviceSpec());
+
+    // Load graph into runtime
+    runtime->LoadNetwork(networkIdentifier, std::move(deserializedOptimized));
+
+    std::vector<float> input1Data(inputTensorInfo.GetNumElements());
+    std::iota(input1Data.begin(), input1Data.end(), 4);
+
+    armnn::InputTensors inputTensors
+    {
+          {0, armnn::ConstTensor(runtime->GetInputTensorInfo(networkIdentifier, 0), input1Data.data())}
+    };
+
+    std::vector<float> outputData(input1Data.size());
+    armnn::OutputTensors outputTensors
+    {
+           {0, armnn::Tensor(runtime->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
+    };
+
+    runtime->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
+
+    BOOST_CHECK_EQUAL(outputData[0], 5.5);
+}
+
 BOOST_AUTO_TEST_SUITE_END()