Add space-to-batch-nd parser to tf-lite

Change-Id: I3bf86d44f811380559ec35eed0bc43b3bd97da80
Signed-off-by: Bruno Goncalves <bruno.slackware@gmail.com>
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 31aab02..e19edc3 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -432,6 +432,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]           =  &TfLiteParser::ParseReshape;
     m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR]   =  &TfLiteParser::ParseResizeBilinear;
     m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]           =  &TfLiteParser::ParseSoftmax;
+    m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND] =  &TfLiteParser::ParseSpaceToBatchND;
     m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]           =  &TfLiteParser::ParseSqueeze;
     m_ParserFunctions[tflite::BuiltinOperator_SUB]               =  &TfLiteParser::ParseSub;
     m_ParserFunctions[tflite::BuiltinOperator_ADD]               =  &TfLiteParser::ParseAdd;
@@ -990,6 +991,54 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 3);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    armnn::TensorInfo blockShapeTensorInfo = ToTensorInfo(inputs[1]);
+    BufferRawPtr blockShapeBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+
+    armnn::TensorInfo padListTensorInfo = ToTensorInfo(inputs[2]);
+    BufferRawPtr padListBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
+
+    std::vector<unsigned int> blockShape(blockShapeTensorInfo.GetNumElements());
+    ::memcpy(blockShape.data(), blockShapeBufferPtr->data.data(), blockShapeTensorInfo.GetNumBytes());
+
+    std::vector<unsigned int> padListVector(padListTensorInfo.GetNumElements());
+    ::memcpy(padListVector.data(), padListBufferPtr->data.data(), padListTensorInfo.GetNumBytes());
+
+    size_t step = 2;
+    std::vector<std::pair<unsigned int, unsigned int>> padList;
+    for (unsigned int i = 0; i < padListTensorInfo.GetNumElements() / step; ++i)
+    {
+        padList.emplace_back(padListVector[i * step], padListVector[i * step + 1]);
+    }
+
+    armnn::SpaceToBatchNdDescriptor desc;
+    desc.m_BlockShape = blockShape;
+    desc.m_PadList = padList;
+    desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+
+    auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
+    IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
+
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
 armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
                                                      const armnn::TensorInfo & inputTensorInfo)
 {