Add space-to-batch-nd parser to tf-lite

Change-Id: I3bf86d44f811380559ec35eed0bc43b3bd97da80
Signed-off-by: Bruno Goncalves <bruno.slackware@gmail.com>
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 31aab02..e19edc3 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -432,6 +432,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]           =  &TfLiteParser::ParseReshape;
     m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR]   =  &TfLiteParser::ParseResizeBilinear;
     m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]           =  &TfLiteParser::ParseSoftmax;
+    m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] =  &TfLiteParser::ParseSpaceToBatchND;
     m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]           =  &TfLiteParser::ParseSqueeze;
     m_ParserFunctions[tflite::BuiltinOperator_SUB]               =  &TfLiteParser::ParseSub;
     m_ParserFunctions[tflite::BuiltinOperator_ADD]               =  &TfLiteParser::ParseAdd;
@@ -990,6 +991,54 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 3);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
+    BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+
+    armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
+    BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
+
+    std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+    ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
+
+    std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
+    ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
+
+    size_t step = 2;
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
+    {
+        padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
+    }
+
+    armnn::SpaceToBatchNdDescriptor desc;
+    desc.m_BlockShape = blockShape;
+    desc.m_PadList = padList;
+    desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+
+    auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
+    IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
+
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
 armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
                                                      const armnn::TensorInfo & inputTensorInfo)
 {
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index ceca9e2..e074b76 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -105,6 +105,7 @@
     void ParseReshape(size_t subgraphIndex, size_t operatorIndex);
     void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex);
     void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
+    void ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex);
     void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
     void ParseSub(size_t subgraphIndex, size_t operatorIndex);
     void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/SpaceToBatchND.cpp b/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
new file mode 100644
index 0000000..6ff4f53
--- /dev/null
+++ b/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
@@ -0,0 +1,178 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SpaceToBatchNDFixture : public ParserFlatbuffersFixture
+{
+    explicit SpaceToBatchNDFixture(const std::string & inputShape,
+                                   const std::string & outputShape,
+                                   const std::string & blockShapeData,
+                                   const std::string & padListData)
+    {
+        m_JsonString = R"(
+            {
+                "version": 3,
+                "operator_codes": [ { "builtin_code": "SPACE_TO_BATCH_ND" } ],
+                "subgraphs": [ {
+                    "tensors": [
+                        {
+                            "shape": )" + inputShape + R"(,
+                            "type": "FLOAT32",
+                            "buffer": 0,
+                            "name": "inputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                             "shape": )" + outputShape + R"(,
+                             "type": "FLOAT32",
+                             "buffer": 1,
+                             "name": "outputTensor",
+                             "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },
+                        {
+                             "shape": [ 2 ],
+                             "type": "INT32",
+                             "buffer": 2,
+                             "name": "blockShapeTensor",
+                             "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                             }
+                        },
+                        {
+                             "shape": [ 2, 2 ],
+                             "type": "INT32",
+                             "buffer": 3,
+                             "name": "padListTensor",
+                             "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                             }
+                        }
+                    ],
+                    "inputs": [ 0 ],
+                    "outputs": [ 1 ],
+                    "operators": [
+                        {
+                            "opcode_index": 0,
+                            "inputs": [ 0, 2, 3 ],
+                            "outputs": [ 1 ],
+                            "custom_options_format": "FLEXBUFFERS"
+                        }
+                    ],
+                } ],
+                "buffers" : [
+                    { },
+                    { },
+                    { "data": )" + blockShapeData + R"(, },
+                    { "data": )" + padListData + R"(, },
+                ]
+            }
+        )";
+      Setup();
+    }
+};
+
+struct SpaceToBatchNDFixtureSimpleTest : public SpaceToBatchNDFixture
+{
+    SpaceToBatchNDFixtureSimpleTest() : SpaceToBatchNDFixture("[ 1, 4, 4, 1 ]",
+                                                              "[ 4, 2, 2, 1 ]",
+                                                              "[ 2,0,0,0, 2,0,0,0 ]",
+                                                              "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdSimpleTest, SpaceToBatchNDFixtureSimpleTest)
+{
+    RunTest<4, armnn::DataType::Float32>
+        (0,
+         {{ "inputTensor",  { 1.0f,  2.0f,  3.0f,  4.0f,
+                              5.0f,  6.0f,  7.0f,  8.0f,
+                              9.0f, 10.0f, 11.0f, 12.0f,
+                             13.0f, 14.0f, 15.0f, 16.0f }}},
+         {{ "outputTensor", { 1.0f, 3.0f,  9.0f, 11.0f,
+                              2.0f, 4.0f, 10.0f, 12.0f,
+                              5.0f, 7.0f, 13.0f, 15.0f,
+                              6.0f, 8.0f, 14.0f, 16.0f }}});
+}
+
+
+struct SpaceToBatchNDFixtureMultipleInputBatchesTest : public SpaceToBatchNDFixture
+{
+    SpaceToBatchNDFixtureMultipleInputBatchesTest() : SpaceToBatchNDFixture("[ 2, 2, 4, 1 ]",
+                                                                            "[ 8, 1, 2, 1 ]",
+                                                                            "[ 2,0,0,0, 2,0,0,0 ]",
+                                                                            "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdMultipleInputBatchesTest, SpaceToBatchNDFixtureMultipleInputBatchesTest)
+{
+    RunTest<4, armnn::DataType::Float32>
+        (0,
+         {{ "inputTensor",  { 1.0f,  2.0f,  3.0f,  4.0f,
+                              5.0f,  6.0f,  7.0f,  8.0f,
+                              9.0f, 10.0f, 11.0f, 12.0f,
+                             13.0f, 14.0f, 15.0f, 16.0f }}},
+         {{ "outputTensor", {  1.0f, 3.0f,  9.0f, 11.0f,
+                               2.0f, 4.0f, 10.0f, 12.0f,
+                               5.0f, 7.0f, 13.0f, 15.0f,
+                               6.0f, 8.0f, 14.0f, 16.0f }}});
+}
+
+struct SpaceToBatchNDFixturePaddingTest : public SpaceToBatchNDFixture
+{
+    SpaceToBatchNDFixturePaddingTest() : SpaceToBatchNDFixture("[ 1, 5, 2, 1 ]",
+                                                               "[ 6, 2, 2, 1 ]",
+                                                               "[ 3,0,0,0, 2,0,0,0 ]",
+                                                               "[ 1,0,0,0, 0,0,0,0, 2,0,0,0, 0,0,0,0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdPaddingTest, SpaceToBatchNDFixturePaddingTest)
+{
+    RunTest<4, armnn::DataType::Float32>
+        (0,
+         {{ "inputTensor",  {  1.0f,  2.0f,  3.0f,  4.0f, 5.0f,
+                               6.0f,  7.0f,  8.0f,  9.0f, 10.0f }}},
+         {{ "outputTensor", {  0.0f, 0.0f,
+                               0.0f, 5.0f,
+
+                               0.0f, 0.0f,
+                               0.0f, 6.0f,
+
+                               0.0f, 1.0f,
+                               0.0f, 7.0f,
+
+                               0.0f, 2.0f,
+                               0.0f, 8.0f,
+
+                               0.0f, 3.0f,
+                               0.0f, 9.0f,
+
+                               0.0f, 4.0f,
+                               0.0f, 10.0f, }}});
+}
+
+BOOST_AUTO_TEST_SUITE_END()