IVGCVSW-2581 Create Deserializer

	* Add deserialize parser for input, output and add layers
	* Add Unit Tests for simple network

Change-Id: Ia0e2a234896bbe401ed0da5f18c065cb5df51bfb
Signed-off-by: Kevin May <kevin.may@arm.com>
Signed-off-by: Saoirse Stewart <saoirse.stewart@arm.com>
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e8d63b9..ea0f2c3 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -159,6 +159,9 @@
     set(armnn_serializer_sources)
     list(APPEND armnn_serializer_sources
         src/armnnSerializer/Schema_generated.h
+        include/armnnDeserializeParser/IDeserializeParser.hpp
+        src/armnnDeserializeParser/DeserializeParser.hpp
+        src/armnnDeserializeParser/DeserializeParser.cpp
         )
 
     add_library_ex(armnnSerializer SHARED ${armnn_serializer_sources})
@@ -539,6 +542,17 @@
             )
     endif()
 
+    if(BUILD_ARMNN_SERIALIZER)
+        enable_language(ASM)
+        list(APPEND unittest_sources
+                src/armnnSerializer/Schema_generated.h
+                src/armnnDeserializeParser/test/DeserializeAdd.cpp
+                src/armnnDeserializeParser/test/ParserFlatbuffersSerializeFixture.hpp
+                src/armnnDeserializeParser/test/SchemaSerialize.s
+                )
+        set_source_files_properties(src/armnnDeserializeParser/test/SchemaSerialize.s PROPERTIES COMPILE_FLAGS "-x assembler-with-cpp")
+    endif()
+
     if(BUILD_ONNX_PARSER)
         list(APPEND unittest_sources
             src/armnnOnnxParser/test/Constructor.cpp
@@ -567,6 +581,10 @@
     target_include_directories(UnitTests PRIVATE src/armnnUtils)
     target_include_directories(UnitTests PRIVATE src/backends)
 
+    if(BUILD_ARMNN_SERIALIZER)
+        target_include_directories(UnitTests SYSTEM PRIVATE "${CMAKE_CURRENT_BINARY_DIR}/src/armnnSerializer")
+    endif()
+
     if(BUILD_TF_LITE_PARSER)
         target_include_directories(UnitTests SYSTEM PRIVATE "${TF_LITE_SCHEMA_INCLUDE_PATH}")
     endif()
@@ -594,6 +612,10 @@
         target_link_libraries(UnitTests armnnCaffeParser)
     endif()
 
+    if(BUILD_ARMNN_SERIALIZER)
+        target_link_libraries(UnitTests armnnSerializer)
+    endif()
+
     if(BUILD_TF_LITE_PARSER)
         target_link_libraries(UnitTests armnnTfLiteParser)
     endif()
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index 7c56ca9..6b6a424 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -221,6 +221,12 @@
     endif()
 endif()
 
+if(BUILD_ARMNN_SERIALIZER)
+    include_directories(SYSTEM "${FLATBUFFERS_INCLUDE_PATH}")
+    add_definitions(-DARMNN_SERIALIZER)
+    add_definitions(-DARMNN_SERIALIZER_SCHEMA_PATH="${CMAKE_CURRENT_SOURCE_DIR}/src/armnnSerializer/Schema.fbs")
+endif()
+
 include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
 
 # ARM Compute
diff --git a/include/armnnDeserializeParser/IDeserializeParser.hpp b/include/armnnDeserializeParser/IDeserializeParser.hpp
new file mode 100644
index 0000000..bb9726e
--- /dev/null
+++ b/include/armnnDeserializeParser/IDeserializeParser.hpp
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "armnn/Types.hpp"
+#include "armnn/NetworkFwd.hpp"
+#include "armnn/Tensor.hpp"
+#include "armnn/INetwork.hpp"
+
+#include <memory>
+#include <map>
+#include <vector>
+
+namespace armnnDeserializeParser
+{
+
+using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+
+class IDeserializeParser;
+using IDeserializeParserPtr = std::unique_ptr<IDeserializeParser, void(*)(IDeserializeParser* parser)>;
+
+class IDeserializeParser
+{
+public:
+    static IDeserializeParser* CreateRaw();
+    static IDeserializeParserPtr Create();
+    static void Destroy(IDeserializeParser* parser);
+
+    /// Create the network from a flatbuffers binary file on disk
+    virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile) = 0;
+
+    /// Create the network from a flatbuffers binary
+    virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent) = 0;
+
+
+    /// Retrieve binding info (layer id and tensor info) for the network input identified by
+    /// the given layer name and layers id
+    virtual BindingPointInfo GetNetworkInputBindingInfo(unsigned int layerId,
+                                                        const std::string& name) const = 0;
+
+    /// Retrieve binding info (layer id and tensor info) for the network output identified by
+    /// the given layer name and layers id
+    virtual BindingPointInfo GetNetworkOutputBindingInfo(unsigned int layerId,
+                                                         const std::string& name) const = 0;
+
+protected:
+    virtual ~IDeserializeParser() {};
+
+};
+}
\ No newline at end of file
diff --git a/src/armnnDeserializeParser/DeserializeParser.cpp b/src/armnnDeserializeParser/DeserializeParser.cpp
new file mode 100644
index 0000000..ca2e7e3
--- /dev/null
+++ b/src/armnnDeserializeParser/DeserializeParser.cpp
@@ -0,0 +1,587 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DeserializeParser.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Exceptions.hpp>
+
+#include <ParserHelper.hpp>
+#include <Permute.hpp>
+#include <VerificationHelpers.hpp>
+
+#include <boost/filesystem.hpp>
+#include <boost/format.hpp>
+#include <boost/core/ignore_unused.hpp>
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+#include <boost/log/trivial.hpp>
+
+// The generated code based on the Serialize schema:
+#include <Schema_generated.h>
+
+#include <fstream>
+
+using armnn::ParseException;
+using namespace armnn;
+using namespace armnn::armnnSerializer;
+
+namespace armnnDeserializeParser {
+
+namespace {
+
+const uint32_t VIRTUAL_LAYER_ID = std::numeric_limits<uint32_t>::max();
+
+ void CheckGraph(const DeserializeParser::GraphPtr& graph,
+                 unsigned int layersIndex,
+                 const CheckLocation& location)
+{
+    if (graph->layers() == nullptr)
+    {
+        throw ParseException(
+                boost::str(
+                        boost::format("%1% was called with invalid (null) graph. "
+                                      "Possible reason is that the graph is not yet loaded and Unpack(ed). "
+                                      "layers:%2% at %3%") %
+                        location.m_Function %
+                        layersIndex %
+                        location.FileLine()));
+    }
+    else if (layersIndex >= graph->layers()->size())
+    {
+        throw ParseException(
+                boost::str(
+                        boost::format("%1% was called with an invalid layers index. "
+                                      "layers:%2% at %3%") %
+                        location.m_Function %
+                        layersIndex %
+                        location.FileLine()));
+    }
+}
+
+void CheckLayers(const DeserializeParser::GraphPtr& graph,
+                 unsigned int layersIndex,
+                 unsigned int layerIndex,
+                 const CheckLocation& location)
+{
+    if (graph->layers() == nullptr)
+    {
+        throw ParseException(
+            boost::str(
+                boost::format("%1% was called with invalid (null) graph. "
+                              "Possible reason is that the graph is not yet loaded and Unpack(ed). "
+                              "layers:%2% at %4%") %
+                location.m_Function %
+                layersIndex %
+                location.FileLine()));
+    }
+    else if (layersIndex >= graph->layers()->size())
+    {
+        throw ParseException(
+            boost::str(
+                boost::format("%1% was called with an invalid layers index. "
+                              "layers:%2% at %4%") %
+                location.m_Function %
+                layersIndex %
+                location.FileLine()));
+    }
+    else if (layerIndex >= graph->layers()[layersIndex].size()
+            && layerIndex != VIRTUAL_LAYER_ID)
+    {
+        throw ParseException(
+            boost::str(
+                boost::format("%1% was called with an invalid layer index. "
+                              "layers:%2% layer:%3% at %4%") %
+                location.m_Function %
+                layersIndex %
+                layerIndex %
+                location.FileLine()));
+    }
+}
+
+void CheckTensorPtr(DeserializeParser::TensorRawPtr rawPtr,
+                    const CheckLocation& location)
+{
+    if (rawPtr == nullptr)
+    {
+        throw ParseException(
+            boost::str(
+                boost::format("%1% was called with a null tensor pointer. "
+                              "at %2%") %
+                location.m_Function %
+                location.FileLine()));
+
+    }
+}
+
+#define CHECK_TENSOR_PTR(TENSOR_PTR) \
+    CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
+
+#define CHECK_LAYERS(GRAPH, LAYERS_INDEX, LAYER_INDEX) \
+    CheckLayers(GRAPH, LAYERS_INDEX, LAYER_INDEX, CHECK_LOCATION())
+
+#define CHECK_GRAPH(GRAPH, LAYERS_INDEX) \
+    CheckGraph(GRAPH, LAYERS_INDEX, CHECK_LOCATION())
+}
+
+DeserializeParser::DeserializeParser()
+: m_Network(nullptr, nullptr),
+//May require LayerType_Max to be included
+m_ParserFunctions(Layer_MAX+1, &DeserializeParser::ParseUnsupportedLayer)
+{
+    // register supported layers
+    m_ParserFunctions[Layer_AdditionLayer]   =  &DeserializeParser::ParseAdd;
+}
+
+DeserializeParser::LayerBaseRawPtr DeserializeParser::GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex)
+{
+    auto layerType = graphPtr->layers()->Get(layerIndex)->layer_type();
+
+    switch(layerType)
+    {
+        case Layer::Layer_AdditionLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_AdditionLayer()->base();
+        case Layer::Layer_InputLayer:
+           return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->base();
+        case Layer::Layer_OutputLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->base();
+        case Layer::Layer_NONE:
+        default:
+            throw ParseException(boost::str(
+                  boost::format("Layer must have a type %1%") %
+                  Layer::Layer_NONE));
+    }
+}
+
+int32_t DeserializeParser::GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex)
+{
+    auto layerType = graphPtr->layers()->Get(layerIndex)->layer_type();
+
+    if (layerType == Layer::Layer_InputLayer)
+    {
+        return graphPtr->layers()->Get(layerIndex)->layer_as_InputLayer()->base()->layerBindingId();
+    }
+    else if ( layerType == Layer::Layer_OutputLayer )
+    {
+        return graphPtr->layers()->Get(layerIndex)->layer_as_OutputLayer()->base()->layerBindingId();
+    }
+    return 0;
+}
+
+armnn::TensorInfo ToTensorInfo(DeserializeParser::TensorRawPtr tensorPtr)
+{
+    armnn::DataType type;
+    CHECK_TENSOR_PTR(tensorPtr);
+
+    switch (tensorPtr->dataType())
+    {
+        case DataType_QuantisedAsymm8:
+            type = armnn::DataType::QuantisedAsymm8;
+            break;
+        case DataType_Float32:
+            type = armnn::DataType::Float32;
+            break;
+        case DataType_Float16:
+            type = armnn::DataType::Float16;
+            break;
+        case DataType_Boolean:
+            type = armnn::DataType::Boolean;
+            break;
+        default:
+        {
+            CheckLocation location = CHECK_LOCATION();
+            throw ParseException(
+                    boost::str(
+                            boost::format("Unsupported data type %1% = %2%. %3%") %
+                            tensorPtr->dataType() %
+                            EnumNameDataType(tensorPtr->dataType()) %
+                            location.AsString()));
+        }
+    }
+    float quantizationScale = tensorPtr->quantizationScale();
+    int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
+    auto dimensions = tensorPtr->dimensions();
+    unsigned int size = dimensions->size();
+    std::vector<unsigned int> outputDims(dimensions->begin(), dimensions->begin() + size);
+
+    // two statements (on purpose) for easier debugging:
+    armnn::TensorInfo result(size,
+                             outputDims.data(),
+                             type,
+                             quantizationScale,
+                             quantizationOffset);
+    return result;
+}
+
+DeserializeParser::LayerBaseRawPtrVector DeserializeParser::GetGraphInputs(const GraphPtr& graphPtr)
+{
+
+    CHECK_GRAPH(graphPtr, 0);
+    const auto& numInputs = graphPtr->inputIds()->size();
+
+    LayerBaseRawPtrVector result(numInputs);
+
+    for (unsigned int i=0; i<numInputs; ++i)
+    {
+        uint32_t inputId = CHECKED_NON_NEGATIVE(graphPtr->inputIds()->Get(i));
+        result[i] = GetBaseLayer(graphPtr, static_cast<uint32_t>(inputId));
+    }
+    return result;
+}
+
+DeserializeParser::LayerBaseRawPtrVector DeserializeParser::GetGraphOutputs(const GraphPtr& graphPtr)
+{
+    CHECK_GRAPH(graphPtr, 0);
+    const auto& numOutputs = graphPtr->outputIds()->size();
+
+    LayerBaseRawPtrVector result(numOutputs);
+
+    for (unsigned int i=0; i<numOutputs; ++i)
+    {
+        uint32_t outputId = CHECKED_NON_NEGATIVE(graphPtr->outputIds()->Get(i));
+        result[i] = GetBaseLayer(graphPtr, static_cast<uint32_t>(outputId));
+    }
+    return result;
+}
+
+DeserializeParser::TensorRawPtrVector DeserializeParser::GetInputs(const GraphPtr& graphPtr,
+                                                                   unsigned int layerIndex)
+{
+    CHECK_LAYERS(graphPtr, 0, layerIndex);
+    auto layer = GetBaseLayer(graphPtr, layerIndex);
+    const auto& numInputs = layer->inputSlots()->size();
+
+    TensorRawPtrVector result(numInputs);
+
+   for (unsigned int i=0; i<numInputs; ++i)
+   {
+       auto inputId = CHECKED_NON_NEGATIVE(static_cast<int32_t>
+                                          (layer->inputSlots()->Get(i)->connection()->sourceLayerIndex()));
+       result[i] = GetBaseLayer(graphPtr, inputId)->outputSlots()->Get(0)->tensorInfo();
+   }
+   return result;
+}
+
+DeserializeParser::TensorRawPtrVector DeserializeParser::GetOutputs(const GraphPtr& graphPtr,
+                                                                    unsigned int layerIndex)
+{
+    CHECK_LAYERS(graphPtr, 0, layerIndex);
+    auto layer = GetBaseLayer(graphPtr, layerIndex);
+    const auto& numOutputs = layer->outputSlots()->size();
+
+    TensorRawPtrVector result(numOutputs);
+
+    for (unsigned int i=0; i<numOutputs; ++i)
+    {
+        result[i] = layer->outputSlots()->Get(i)->tensorInfo();
+    }
+    return result;
+}
+
+void DeserializeParser::ParseUnsupportedLayer(unsigned int layerIndex)
+{
+    CHECK_LAYERS(m_Graph, 0, layerIndex);
+    const auto layerName = GetBaseLayer(m_Graph, layerIndex)->layerName()->c_str();
+    throw ParseException(
+        boost::str(
+            boost::format("Layer not supported. "
+                          "layerIndex: %1% "
+                          "layerName: %2% / %3%") %
+            layerIndex %
+            layerName %
+            CHECK_LOCATION().AsString()));
+}
+
+void DeserializeParser::ResetParser()
+{
+    m_Network = armnn::INetworkPtr(nullptr, nullptr);
+    m_Graph = nullptr;
+}
+
+IDeserializeParser* IDeserializeParser::CreateRaw()
+{
+    return new DeserializeParser();
+}
+
+IDeserializeParserPtr IDeserializeParser::Create()
+{
+    return IDeserializeParserPtr(CreateRaw(), &IDeserializeParser::Destroy);
+}
+
+void IDeserializeParser::Destroy(IDeserializeParser* parser)
+{
+    delete parser;
+}
+
+INetworkPtr DeserializeParser::CreateNetworkFromBinaryFile(const char* graphFile)
+{
+    ResetParser();
+    m_Graph = LoadGraphFromFile(graphFile);
+    return CreateNetworkFromGraph();
+}
+
+INetworkPtr DeserializeParser::CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent)
+{
+     ResetParser();
+     m_Graph = LoadGraphFromBinary(binaryContent.data(), binaryContent.size());
+     return CreateNetworkFromGraph();
+}
+
+DeserializeParser::GraphPtr DeserializeParser::LoadGraphFromFile(const char* fileName)
+{
+    if (fileName == nullptr)
+    {
+        throw InvalidArgumentException(boost::str(boost::format("Invalid (null) file name %1%") %
+                                                  CHECK_LOCATION().AsString()));
+    }
+    boost::system::error_code errorCode;
+    boost::filesystem::path pathToFile(fileName);
+    if (!boost::filesystem::exists(pathToFile, errorCode))
+    {
+        throw FileNotFoundException(boost::str(boost::format("Cannot find the file (%1%) errorCode: %2% %3%") %
+                                               fileName %
+                                               errorCode %
+                                               CHECK_LOCATION().AsString()));
+    }
+    std::ifstream file(fileName, std::ios::binary);
+    std::string fileContent((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
+    return LoadGraphFromBinary(reinterpret_cast<const uint8_t*>(fileContent.c_str()), fileContent.size());
+}
+
+DeserializeParser::GraphPtr DeserializeParser::LoadGraphFromBinary(const uint8_t* binaryContent, size_t len)
+{
+    if (binaryContent == nullptr)
+    {
+        throw InvalidArgumentException(boost::str(boost::format("Invalid (null) binary content %1%") %
+                                                  CHECK_LOCATION().AsString()));
+    }
+    flatbuffers::Verifier verifier(binaryContent, len);
+    if (verifier.VerifyBuffer<SerializedGraph>() == false)
+    {
+        throw ParseException(
+                boost::str(boost::format("Buffer doesn't conform to the expected Armnn "
+                                         "flatbuffers format. size:%1% %2%") %
+                           len %
+                           CHECK_LOCATION().AsString()));
+    }
+    return GetSerializedGraph(binaryContent);
+}
+
+INetworkPtr DeserializeParser::CreateNetworkFromGraph()
+{
+    m_Network = INetwork::Create();
+    BOOST_ASSERT(m_Graph != nullptr);
+    unsigned int layerIndex = 0;
+    m_GraphConnections.emplace_back(m_Graph->layers()->size());
+    for (AnyLayer const* layer : *m_Graph->layers())
+    {
+        if (layer->layer_type() != Layer_InputLayer &&
+            layer->layer_type() != Layer_OutputLayer)
+        {
+            // lookup and call the parser function
+            auto& parserFunction = m_ParserFunctions[layer->layer_type()];
+            (this->*parserFunction)(layerIndex);
+        }
+        ++layerIndex;
+    }
+
+    SetupInputLayers();
+    SetupOutputLayers();
+
+    // establish the connections from the layer outputs to the inputs of the subsequent layers
+    for (size_t connectionIndex = 0; connectionIndex < m_GraphConnections[0].size(); ++connectionIndex)
+    {
+        if (m_GraphConnections[0][connectionIndex].outputSlot != nullptr)
+        {
+            for (size_t inputSlotIdx = 0;
+                 inputSlotIdx < m_GraphConnections[0][connectionIndex].inputSlots.size();
+                 ++inputSlotIdx)
+            {
+                m_GraphConnections[0][connectionIndex].outputSlot->Connect(
+                        *(m_GraphConnections[0][connectionIndex].inputSlots[inputSlotIdx]));
+            }
+        }
+    }
+
+    return std::move(m_Network);
+}
+
+BindingPointInfo DeserializeParser::GetNetworkInputBindingInfo(unsigned int layerIndex,
+                                                               const std::string& name) const
+{
+    CHECK_LAYERS(m_Graph, 0, layerIndex);
+    auto inputs = GetGraphInputs(m_Graph);
+
+    for (auto const& input : inputs)
+    {
+        if (input->layerName()->c_str() == name)
+        {
+            int bindingId = reinterpret_cast<armnn::LayerBindingId>(GetBindingLayerInfo(m_Graph, input->index()));
+            auto layerBase = GetBaseLayer(m_Graph,input->index())->outputSlots()->Get(layerIndex);
+            return std::make_pair(bindingId, ToTensorInfo(layerBase->tensorInfo()));
+        }
+    }
+    throw ParseException(
+            boost::str(
+                    boost::format("No input binding found for layer:%1% / %2%") %
+                    name %
+                    CHECK_LOCATION().AsString()));
+}
+
+BindingPointInfo DeserializeParser::GetNetworkOutputBindingInfo(unsigned int layerIndex,
+                                                                const std::string& name) const
+{
+    CHECK_LAYERS(m_Graph, 0, layerIndex);
+    auto outputs = GetGraphOutputs(m_Graph);
+
+    for (auto const& output : outputs)
+    {
+        if (output->layerName()->c_str() == name)
+        {
+            int bindingId = reinterpret_cast<armnn::LayerBindingId>(GetBindingLayerInfo(m_Graph, output->index()));
+            auto layer = GetBaseLayer(m_Graph, output->index());
+            auto sourceLayerIndex = layer->inputSlots()->Get(0)->connection()->sourceLayerIndex();
+            auto sourceLayer = GetBaseLayer(m_Graph, sourceLayerIndex);
+            return std::make_pair(bindingId, ToTensorInfo(sourceLayer->outputSlots()->Get(0)->tensorInfo()));
+        }
+    }
+    throw ParseException(
+        boost::str(
+            boost::format("No output binding found for layer:%1% / %2%") %
+            name %
+            CHECK_LOCATION().AsString()));
+}
+
+void DeserializeParser::SetupInputLayers()
+{
+    CHECK_GRAPH(m_Graph, 0);
+    auto inputs = GetGraphInputs(m_Graph);
+    for (auto const& input : inputs)
+    {
+        IConnectableLayer* layer =
+            m_Network->AddInputLayer(static_cast<int>(input->index()), input->layerName()->c_str());
+
+        auto tensorInfo = ToTensorInfo(input->outputSlots()->Get(0)->tensorInfo());
+        layer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+        RegisterOutputSlots(input->index(), layer);
+    }
+}
+
+void DeserializeParser::SetupOutputLayers()
+{
+    CHECK_GRAPH(m_Graph, 0);
+    auto outputs = GetGraphOutputs(m_Graph);
+    for (auto const& output : outputs)
+    {
+        IConnectableLayer* layer =
+            m_Network->AddOutputLayer(static_cast<int>(output->index()), output->layerName()->c_str());
+
+        RegisterInputSlots(output->index(), layer);
+    }
+}
+
+void DeserializeParser::RegisterOutputSlots(uint32_t layerIndex,
+                                            IConnectableLayer* layer)
+{
+    CHECK_LAYERS(m_Graph, 0, layerIndex);
+    BOOST_ASSERT(layer != nullptr);
+    auto parsedLayer = GetBaseLayer(m_Graph, layerIndex);
+    if (parsedLayer->outputSlots()->size() != layer->GetNumOutputSlots())
+    {
+        throw ParseException(
+            boost::str(boost::format("The number of outputslots (%1%) does not match the number expected (%2%)"
+                                     " for layer index: %3% %4%") %
+                       parsedLayer->outputSlots()->size() %
+                       layer->GetNumOutputSlots() %
+                       layerIndex %
+                       CHECK_LOCATION().AsString()));
+    }
+
+    for (unsigned int slotIndex = 0; slotIndex < layer->GetNumOutputSlots(); ++slotIndex)
+    {
+        armnn::IOutputSlot* slot = &(layer->GetOutputSlot(slotIndex));
+        RegisterOutputSlotOfConnection(layerIndex, slot);
+    }
+}
+
+void DeserializeParser::RegisterInputSlots(uint32_t layerIndex,
+                                           armnn::IConnectableLayer* layer)
+{
+    CHECK_LAYERS(m_Graph, 0, layerIndex);
+    BOOST_ASSERT(layer != nullptr);
+    auto parsedLayer = GetBaseLayer(m_Graph, layerIndex);
+    if (parsedLayer->inputSlots()->size() != layer->GetNumInputSlots())
+    {
+        throw ParseException(
+            boost::str(boost::format("The number of inputslots (%1%) does not match the number expected (%2%)"
+                                     " for layer index:%3% %4%") %
+                       parsedLayer->inputSlots()->size() %
+                       layer->GetNumInputSlots() %
+                       layerIndex %
+                       CHECK_LOCATION().AsString()));
+    }
+
+    for (unsigned int slotIndex = 0; slotIndex < layer->GetNumInputSlots(); ++slotIndex)
+    {
+        armnn::IInputSlot* slot = &(layer->GetInputSlot(slotIndex));
+        uint32_t sourceLayerIndex = parsedLayer->inputSlots()->Get(slotIndex)->connection()->sourceLayerIndex();
+        RegisterInputSlotOfConnection(sourceLayerIndex, slot);
+    }
+}
+
+void DeserializeParser::RegisterInputSlotOfConnection(uint32_t connectionIndex,
+                                                      armnn::IInputSlot* slot)
+{
+    BOOST_ASSERT(m_GraphConnections[0].size() > connectionIndex);
+
+    Slots& slots = m_GraphConnections[0][connectionIndex];
+    slots.inputSlots.push_back(slot);
+}
+
+void DeserializeParser::RegisterOutputSlotOfConnection(uint32_t connectionIndex,
+                                                       armnn::IOutputSlot* slot)
+{
+    BOOST_ASSERT(m_GraphConnections[0].size() > connectionIndex);
+
+    Slots& slots = m_GraphConnections[0][connectionIndex];
+
+    // assuming there is only one producer for that tensor
+    if (slots.outputSlot != nullptr)
+    {
+        throw ParseException(boost::str(
+            boost::format("Another layer has already registered itself as the producer of "
+                          "connection:%1% / %2%") %
+            connectionIndex %
+            CHECK_LOCATION().AsString()));
+    }
+
+    slots.outputSlot = slot;
+}
+
+void DeserializeParser::ParseAdd(unsigned int layerIndex)
+{
+    CHECK_LAYERS(m_Graph, 0, layerIndex);
+    auto inputs = GetInputs(m_Graph, layerIndex);
+    CHECK_LOCATION();
+    CHECK_VALID_SIZE(inputs.size(), 2);
+
+    auto outputs = GetOutputs(m_Graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto layerName = boost::str(boost::format("Addition:%1%") % layerIndex);
+    IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    RegisterInputSlots(layerIndex, layer);
+    RegisterOutputSlots(layerIndex, layer);
+}
+
+}
+
+
diff --git a/src/armnnDeserializeParser/DeserializeParser.hpp b/src/armnnDeserializeParser/DeserializeParser.hpp
new file mode 100644
index 0000000..322826c
--- /dev/null
+++ b/src/armnnDeserializeParser/DeserializeParser.hpp
@@ -0,0 +1,98 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "armnn/INetwork.hpp"
+#include "armnnDeserializeParser/IDeserializeParser.hpp"
+#include <Schema_generated.h>
+
+namespace armnnDeserializeParser
+{
+class DeserializeParser : public IDeserializeParser
+{
+public:
+    // Shorthands for deserializer types
+    using GraphPtr = const armnn::armnnSerializer::SerializedGraph *;
+    using TensorRawPtr = const armnn::armnnSerializer::TensorInfo *;
+    using TensorRawPtrVector = std::vector<TensorRawPtr>;
+    using LayerRawPtr = const armnn::armnnSerializer::LayerBase *;
+    using LayerBaseRawPtr = const armnn::armnnSerializer::LayerBase *;
+    using LayerBaseRawPtrVector = std::vector<LayerBaseRawPtr>;
+
+public:
+
+    /// Create the network from a flatbuffers binary file on disk
+    virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile) override;
+
+    virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t>& binaryContent) override;
+
+    /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name
+    virtual BindingPointInfo GetNetworkInputBindingInfo(unsigned int layerId,
+                                                        const std::string& name) const override;
+
+    /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name
+    virtual BindingPointInfo GetNetworkOutputBindingInfo(unsigned int layerId,
+                                                         const std::string& name) const override;
+
+    DeserializeParser();
+    ~DeserializeParser() {}
+
+public:
+    // testable helpers
+    static GraphPtr LoadGraphFromFile(const char* fileName);
+    static GraphPtr LoadGraphFromBinary(const uint8_t* binaryContent, size_t len);
+    static TensorRawPtrVector GetInputs(const GraphPtr& graph, unsigned int layerIndex);
+    static TensorRawPtrVector GetOutputs(const GraphPtr& graph, unsigned int layerIndex);
+    static LayerBaseRawPtrVector GetGraphInputs(const GraphPtr& graphPtr);
+    static LayerBaseRawPtrVector GetGraphOutputs(const GraphPtr& graphPtr);
+    static LayerBaseRawPtr GetBaseLayer(const GraphPtr& graphPtr, unsigned int layerIndex);
+    static int32_t GetBindingLayerInfo(const GraphPtr& graphPtr, unsigned int layerIndex);
+
+private:
+    // No copying allowed until it is wanted and properly implemented
+    DeserializeParser(const DeserializeParser&) = delete;
+    DeserializeParser& operator=(const DeserializeParser&) = delete;
+
+    /// Create the network from an already loaded flatbuffers graph
+    armnn::INetworkPtr CreateNetworkFromGraph();
+
+    // signature for the parser functions
+    using LayerParsingFunction = void(DeserializeParser::*)(unsigned int layerIndex);
+
+    void ParseUnsupportedLayer(unsigned int serializeGraphIndex);
+    void ParseAdd(unsigned int serializeGraphIndex);
+
+    void RegisterOutputSlotOfConnection(uint32_t connectionIndex, armnn::IOutputSlot* slot);
+    void RegisterInputSlotOfConnection(uint32_t connectionIndex, armnn::IInputSlot* slot);
+    void RegisterInputSlots(uint32_t layerIndex,
+                            armnn::IConnectableLayer* layer);
+    void RegisterOutputSlots(uint32_t layerIndex,
+                             armnn::IConnectableLayer* layer);
+    void ResetParser();
+
+    void SetupInputLayers();
+    void SetupOutputLayers();
+
+    /// The network we're building. Gets cleared after it is passed to the user
+    armnn::INetworkPtr                    m_Network;
+    GraphPtr                              m_Graph;
+    std::vector<LayerParsingFunction>     m_ParserFunctions;
+
+    /// A mapping of an output slot to each of the input slots it should be connected to
+    /// The outputSlot is from the layer that creates this tensor as one of its outputs
+    /// The inputSlots are from the layers that use this tensor as one of their inputs
+    struct Slots
+    {
+        armnn::IOutputSlot* outputSlot;
+        std::vector<armnn::IInputSlot*> inputSlots;
+
+        Slots() : outputSlot(nullptr) { }
+    };
+    typedef std::vector<Slots> Connection;
+    std::vector<Connection>   m_GraphConnections;
+};
+
+}
diff --git a/src/armnnDeserializeParser/DeserializerSupport.md b/src/armnnDeserializeParser/DeserializerSupport.md
new file mode 100644
index 0000000..7135003
--- /dev/null
+++ b/src/armnnDeserializeParser/DeserializerSupport.md
@@ -0,0 +1,11 @@
+# The layers that ArmNN SDK Deserializer currently supports.
+
+This reference guide provides a list of layers which can be deserialized currently by the Arm NN SDK.
+
+## Fully supported
+
+The Arm NN SDK Deserialize parser currently supports the following layers:
+
+* Addition
+
+More machine learning layers will be supported in future releases.
diff --git a/src/armnnDeserializeParser/README.md b/src/armnnDeserializeParser/README.md
new file mode 100644
index 0000000..56eca53
--- /dev/null
+++ b/src/armnnDeserializeParser/README.md
@@ -0,0 +1,7 @@
+# The Arm NN Deserialize parser
+
+The `armnnDeserializeParser` is a library for loading neural networks defined by Arm NN FlatBuffers files
+into the Arm NN runtime.
+
+For more information about the layers that are supported, and the networks that have been tested,
+see [DeserializeSupport.md](./DeserializeSupport.md)
\ No newline at end of file
diff --git a/src/armnnDeserializeParser/test/DeserializeAdd.cpp b/src/armnnDeserializeParser/test/DeserializeAdd.cpp
new file mode 100644
index 0000000..03d50e3
--- /dev/null
+++ b/src/armnnDeserializeParser/test/DeserializeAdd.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../DeserializeParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(DeserializeParser)
+
+struct AddFixture : public ParserFlatbuffersSerializeFixture
+{
+    explicit AddFixture(const std::string & inputShape1,
+                        const std::string & inputShape2,
+                        const std::string & outputShape,
+                        const std::string & dataType,
+                        const std::string & activation="NONE")
+    {
+        m_JsonString = R"(
+        {
+                inputIds: [0, 1],
+                outputIds: [3],
+                layers: [
+                {
+                    layer_type: "InputLayer",
+                    layer: {
+                          base: {
+                                layerBindingId: 0,
+                                base: {
+                                    index: 0,
+                                    layerName: "InputLayer1",
+                                    layerType: "Input",
+                                    inputSlots: [{
+                                        index: 0,
+                                        connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                    }],
+                                    outputSlots: [ {
+                                        index: 0,
+                                        tensorInfo: {
+                                            dimensions: )" + inputShape1 + R"(,
+                                            dataType: )" + dataType + R"(
+                                        },
+                                    }],
+                                 },}},
+                },
+                {
+                layer_type: "InputLayer",
+                layer: {
+                       base: {
+                            layerBindingId: 1,
+                            base: {
+                                  index:1,
+                                  layerName: "InputLayer2",
+                                  layerType: "Input",
+                                  inputSlots: [{
+                                      index: 0,
+                                      connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                  }],
+                                  outputSlots: [ {
+                                      index: 0,
+                                      tensorInfo: {
+                                          dimensions: )" + inputShape2 + R"(,
+                                          dataType: )" + dataType + R"(
+                                      },
+                                  }],
+                                },}},
+                },
+                {
+                layer_type: "AdditionLayer",
+                layer : {
+                        base: {
+                             index:2,
+                             layerName: "AdditionLayer",
+                             layerType: "Addition",
+                             inputSlots: [
+                                            {
+                                             index: 0,
+                                             connection: {sourceLayerIndex:0, outputSlotIndex:0 },
+                                            },
+                                            {
+                                             index: 1,
+                                             connection: {sourceLayerIndex:1, outputSlotIndex:0 },
+                                            }
+                             ],
+                             outputSlots: [ {
+                                 index: 0,
+                                 tensorInfo: {
+                                     dimensions: )" + outputShape + R"(,
+                                     dataType: )" + dataType + R"(
+                                 },
+                             }],
+                            }},
+                },
+                {
+                layer_type: "OutputLayer",
+                layer: {
+                        base:{
+                              layerBindingId: 3,
+                              base: {
+                                    index: 3,
+                                    layerName: "OutputLayer",
+                                    layerType: "Output",
+                                    inputSlots: [{
+                                        index: 0,
+                                        connection: {sourceLayerIndex:2, outputSlotIndex:0 },
+                                    }],
+                                    outputSlots: [ {
+                                        index: 0,
+                                        tensorInfo: {
+                                            dimensions: )" + outputShape + R"(,
+                                            dataType: )" + dataType + R"(
+                                        },
+                                }],
+                            }}},
+                }]
+         }
+        )";
+        Setup();
+    }
+};
+
+
+struct SimpleAddFixture : AddFixture
+{
+    SimpleAddFixture() : AddFixture("[ 2, 2 ]",
+                                    "[ 2, 2 ]",
+                                    "[ 2, 2 ]",
+                                    "QuantisedAsymm8") {}
+};
+
+struct SimpleAddFixture2 : AddFixture
+{
+    SimpleAddFixture2() : AddFixture("[ 2, 2, 1, 1 ]",
+                                     "[ 2, 2, 1, 1 ]",
+                                     "[ 2, 2, 1, 1 ]",
+                                     "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(AddQuantisedAsymm8, SimpleAddFixture)
+{
+  RunTest<2, armnn::DataType::QuantisedAsymm8>(
+      0,
+      {{"InputLayer1", { 0, 1, 2, 3 }},
+      {"InputLayer2", { 4, 5, 6, 7 }}},
+      {{"OutputLayer", { 4, 6, 8, 10 }}});
+}
+
+BOOST_FIXTURE_TEST_CASE(AddFloat32, SimpleAddFixture2)
+{
+    RunTest<4, armnn::DataType::Float32>(
+    0,
+    {{"InputLayer1", { 111, 85, 226, 3 }},
+    {"InputLayer2", {   5,   8,  10, 12 }}},
+    {{"OutputLayer", { 116, 93, 236, 15 }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnDeserializeParser/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializeParser/test/ParserFlatbuffersSerializeFixture.hpp
new file mode 100644
index 0000000..5d8c377
--- /dev/null
+++ b/src/armnnDeserializeParser/test/ParserFlatbuffersSerializeFixture.hpp
@@ -0,0 +1,199 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "SchemaSerialize.hpp"
+
+#include <armnn/IRuntime.hpp>
+#include <armnnDeserializeParser/IDeserializeParser.hpp>
+
+#include <boost/assert.hpp>
+#include <boost/format.hpp>
+
+#include "TypeUtils.hpp"
+#include "test/TensorHelpers.hpp"
+
+#include "flatbuffers/idl.h"
+#include "flatbuffers/util.h"
+
+#include <Schema_generated.h>
+
+using armnnDeserializeParser::IDeserializeParser;
+using TensorRawPtr =  armnn::armnnSerializer::TensorInfo*;
+
+struct ParserFlatbuffersSerializeFixture
+{
+    ParserFlatbuffersSerializeFixture() :
+        m_Parser(IDeserializeParser::Create()),
+        m_Runtime(armnn::IRuntime::Create(armnn::IRuntime::CreationOptions())),
+        m_NetworkIdentifier(-1)
+    {
+    }
+
+    std::vector<uint8_t> m_GraphBinary;
+    std::string m_JsonString;
+    std::unique_ptr<IDeserializeParser, void (*)(IDeserializeParser* parser)> m_Parser;
+    armnn::IRuntimePtr m_Runtime;
+    armnn::NetworkId m_NetworkIdentifier;
+
+    /// If the single-input-single-output overload of Setup() is called, these will store the input and output name
+    /// so they don't need to be passed to the single-input-single-output overload of RunTest().
+    std::string m_SingleInputName;
+    std::string m_SingleOutputName;
+
+    void Setup()
+    {
+        bool ok = ReadStringToBinary();
+        if (!ok)
+        {
+            throw armnn::Exception("LoadNetwork failed while reading binary input");
+        }
+
+        armnn::INetworkPtr network =
+                m_Parser->CreateNetworkFromBinary(m_GraphBinary);
+
+        if (!network)
+        {
+            throw armnn::Exception("The parser failed to create an ArmNN network");
+        }
+
+        auto optimized = Optimize(*network, {armnn::Compute::CpuRef},
+                                  m_Runtime->GetDeviceSpec());
+
+        std::string errorMessage;
+        armnn::Status ret = m_Runtime->LoadNetwork(m_NetworkIdentifier, move(optimized), errorMessage);
+
+        if (ret != armnn::Status::Success)
+        {
+            throw armnn::Exception(
+                    boost::str(
+                            boost::format("The runtime failed to load the network. "
+                                          "Error was: %1%. in %2% [%3%:%4%]") %
+                            errorMessage %
+                            __func__ %
+                            __FILE__ %
+                            __LINE__));
+        }
+
+    }
+
+    void SetupSingleInputSingleOutput(const std::string& inputName, const std::string& outputName)
+    {
+        // Store the input and output name so they don't need to be passed to the single-input-single-output RunTest().
+        m_SingleInputName = inputName;
+        m_SingleOutputName = outputName;
+        Setup();
+    }
+
+    bool ReadStringToBinary()
+    {
+        std::string schemafile(&deserialize_schema_start, &deserialize_schema_end);
+
+        // parse schema first, so we can use it to parse the data after
+        flatbuffers::Parser parser;
+
+        bool ok = parser.Parse(schemafile.c_str());
+        BOOST_ASSERT_MSG(ok, "Failed to parse schema file");
+
+        ok &= parser.Parse(m_JsonString.c_str());
+        BOOST_ASSERT_MSG(ok, "Failed to parse json input");
+
+        if (!ok)
+        {
+            return false;
+        }
+
+        {
+            const uint8_t* bufferPtr = parser.builder_.GetBufferPointer();
+            size_t size = static_cast<size_t>(parser.builder_.GetSize());
+            m_GraphBinary.assign(bufferPtr, bufferPtr+size);
+        }
+        return ok;
+    }
+
+    /// Executes the network with the given input tensor and checks the result against the given output tensor.
+    /// This overload assumes the network has a single input and a single output.
+    template <std::size_t NumOutputDimensions,
+              armnn::DataType ArmnnType,
+              typename DataType = armnn::ResolveType<ArmnnType>>
+    void RunTest(unsigned int layersId,
+                 const std::vector<DataType>& inputData,
+                 const std::vector<DataType>& expectedOutputData);
+
+    /// Executes the network with the given input tensors and checks the results against the given output tensors.
+    /// This overload supports multiple inputs and multiple outputs, identified by name.
+    template <std::size_t NumOutputDimensions,
+              armnn::DataType ArmnnType,
+              typename DataType = armnn::ResolveType<ArmnnType>>
+    void RunTest(unsigned int layersId,
+                 const std::map<std::string, std::vector<DataType>>& inputData,
+                 const std::map<std::string, std::vector<DataType>>& expectedOutputData);
+
+    void CheckTensors(const TensorRawPtr& tensors, size_t shapeSize, const std::vector<int32_t>& shape,
+                      armnn::armnnSerializer::TensorInfo tensorType, const std::string& name,
+                      const float scale, const int64_t zeroPoint)
+    {
+        BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
+        BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(),
+                                      tensors->dimensions()->begin(), tensors->dimensions()->end());
+        BOOST_CHECK_EQUAL(tensorType.dataType(), tensors->dataType());
+        BOOST_CHECK_EQUAL(scale, tensors->quantizationScale());
+        BOOST_CHECK_EQUAL(zeroPoint, tensors->quantizationOffset());
+    }
+};
+
+template <std::size_t NumOutputDimensions,
+          armnn::DataType ArmnnType,
+          typename DataType>
+void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId,
+                                                const std::vector<DataType>& inputData,
+                                                const std::vector<DataType>& expectedOutputData)
+{
+    RunTest<NumOutputDimensions, ArmnnType>(layersId,
+                                            { { m_SingleInputName, inputData } },
+                                            { { m_SingleOutputName, expectedOutputData } });
+}
+
+template <std::size_t NumOutputDimensions,
+          armnn::DataType ArmnnType,
+          typename DataType>
+void ParserFlatbuffersSerializeFixture::RunTest(unsigned int layersId,
+                                                const std::map<std::string, std::vector<DataType>>& inputData,
+                                                const std::map<std::string, std::vector<DataType>>& expectedOutputData)
+{
+    using BindingPointInfo = std::pair<armnn::LayerBindingId, armnn::TensorInfo>;
+
+    // Setup the armnn input tensors from the given vectors.
+    armnn::InputTensors inputTensors;
+    for (auto&& it : inputData)
+    {
+        BindingPointInfo bindingInfo = m_Parser->GetNetworkInputBindingInfo(layersId, it.first);
+        armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
+        inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data()) });
+    }
+
+    // Allocate storage for the output tensors to be written to and setup the armnn output tensors.
+    std::map<std::string, boost::multi_array<DataType, NumOutputDimensions>> outputStorage;
+    armnn::OutputTensors outputTensors;
+    for (auto&& it : expectedOutputData)
+    {
+        BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(layersId, it.first);
+        armnn::VerifyTensorInfoDataType<ArmnnType>(bindingInfo.second);
+        outputStorage.emplace(it.first, MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second));
+        outputTensors.push_back(
+                { bindingInfo.first, armnn::Tensor(bindingInfo.second, outputStorage.at(it.first).data()) });
+    }
+
+    m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors);
+
+    // Compare each output tensor to the expected values
+    for (auto&& it : expectedOutputData)
+    {
+        BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(layersId, it.first);
+        auto outputExpected = MakeTensor<DataType, NumOutputDimensions>(bindingInfo.second, it.second);
+        BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+    }
+}
diff --git a/src/armnnDeserializeParser/test/SchemaSerialize.hpp b/src/armnnDeserializeParser/test/SchemaSerialize.hpp
new file mode 100644
index 0000000..ec7e6ba
--- /dev/null
+++ b/src/armnnDeserializeParser/test/SchemaSerialize.hpp
@@ -0,0 +1,9 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+extern "C" {
+extern const char deserialize_schema_start;
+extern const char deserialize_schema_end;
+}
diff --git a/src/armnnDeserializeParser/test/SchemaSerialize.s b/src/armnnDeserializeParser/test/SchemaSerialize.s
new file mode 100644
index 0000000..dbbb7db
--- /dev/null
+++ b/src/armnnDeserializeParser/test/SchemaSerialize.s
@@ -0,0 +1,13 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+.section .rodata
+
+.global deserialize_schema_start
+.global deserialize_schema_end
+
+deserialize_schema_start:
+.incbin ARMNN_SERIALIZER_SCHEMA_PATH
+deserialize_schema_end: