IVGCVSW-2848 - Add TfLite Parser support for Unpack layer

* Added ParseUnpack in TfLiteParser
* New Unpack test file with test reproducing unpack in DeepSpeechV1 model
* Added documentation for supported Unpack to TensorflorLiteSupport.md

Signed-off-by: Nina Drozd <nina.drozd@arm.com>
Change-Id: Ie920d46254ff4b4ab544407ace4c1d489af83157
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3febe7b..e9172f2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -521,6 +521,7 @@
              src/armnnTfLiteParser/test/Squeeze.cpp
              src/armnnTfLiteParser/test/StridedSlice.cpp
              src/armnnTfLiteParser/test/Sub.cpp
+             src/armnnTfLiteParser/test/Unpack.cpp
              src/armnnTfLiteParser/test/LoadModel.cpp
              src/armnnTfLiteParser/test/GetBuffer.cpp
              src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
index 821aecc..dc16334 100644
--- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md
+++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
@@ -58,6 +58,8 @@
 
 * TANH
 
+* UNPACK
+
 ## Custom Operator
 
 * TFLite_Detection_PostProcess
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index f689dee..86688ad 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -468,6 +468,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_PAD]               =  &TfLiteParser::ParsePad;
     m_ParserFunctions[tflite::BuiltinOperator_SPLIT]             =  &TfLiteParser::ParseSplit;
     m_ParserFunctions[tflite::BuiltinOperator_TANH]              =  &TfLiteParser::ParseTanH;
+    m_ParserFunctions[tflite::BuiltinOperator_UNPACK]            =  &TfLiteParser::ParseUnpack;
 }
 
 void TfLiteParser::ResetParser()
@@ -1867,6 +1868,83 @@
                                                               outputTensorIndexes[3]});
 }
 
+void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+    const auto * options = operatorPtr->builtin_options.AsUnpackOptions();
+
+    // This unpackAxis indicates the axis to unpack
+    const unsigned int unpackAxis = CHECKED_NON_NEGATIVE(options->axis);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
+    unsigned int unpackNum = CHECKED_NON_NEGATIVE(options->num);
+    // If num is not defined, automatically infer from the length of the dimension axis.
+    if(unpackNum == 0)
+    {
+        unpackNum = inputTensorInfo.GetShape()[unpackAxis];
+    }
+
+    // If unpack number cannot be inferred and is still zero, throw ParseException.
+    if(unpackNum == 0)
+    {
+        throw ParseException("Number to unpack must greater than zero.");
+    }
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), unpackNum);
+
+    auto inputDimSize = inputTensorInfo.GetNumDimensions();
+    std::vector<unsigned int> unpackDimSizes(inputDimSize);
+
+    // Add current input shape to unpackDimSizes
+    for (unsigned int i = 0; i < inputDimSize; ++i)
+    {
+        unpackDimSizes[i] = inputTensorInfo.GetShape()[i];
+    }
+
+    if (unpackDimSizes[unpackAxis] != unpackNum)
+    {
+        throw ParseException("Number to unpack must be the same as length of the dimension to "
+                             "unpack along.");
+    }
+
+    unpackDimSizes[unpackAxis] /= unpackNum;
+
+    SplitterDescriptor splitDesc(unpackNum, static_cast<unsigned int>(unpackDimSizes.size()));
+    for (unsigned int j = 0; j < unpackNum; ++j)
+    {
+        // Set the size of the views.
+        for (unsigned int dimIdx = 0; dimIdx < unpackDimSizes.size(); ++dimIdx)
+        {
+            splitDesc.SetViewSize(j, dimIdx, unpackDimSizes[dimIdx]);
+        }
+        splitDesc.SetViewOriginCoord(j, unpackAxis, unpackDimSizes[unpackAxis] * j);
+    }
+
+    auto layerName = boost::str(boost::format("Unpack:%1%:%2%") % subgraphIndex % operatorIndex);
+    IConnectableLayer* layer = m_Network->AddSplitterLayer(splitDesc, layerName.c_str());
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    TensorShape outShape = TensorShape(static_cast<unsigned int>(unpackDimSizes.size()),
+        unpackDimSizes.data());
+
+    for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
+    {
+        layer->GetOutputSlot(k).SetTensorInfo(armnn::TensorInfo(outShape,
+            inputTensorInfo.GetDataType()));
+    }
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
 void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
@@ -1876,6 +1954,12 @@
 
     const unsigned int numSplits = CHECKED_NON_NEGATIVE(options->num_splits);
 
+    // If number of splits cannot be inferred and is zero, throw ParseException.
+    if(numSplits == 0)
+    {
+        throw ParseException("Number to splits must greater than zero.");
+    }
+
     auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(inputs.size(), 2);
     auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index a3ef22f..929af1f 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -91,6 +91,7 @@
 
     void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex);
     void ParseActivation(size_t subgraphIndex, size_t operatorIndex, armnn::ActivationFunction activationType);
+    void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
     void ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex);
     void ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex);
     void ParseConcatenation(size_t subgraphIndex, size_t operatorIndex);
@@ -101,23 +102,23 @@
     void ParseLogistic(size_t subgraphIndex, size_t operatorIndex);
     void ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex);
     void ParseMaximum(size_t subgraphIndex, size_t operatorIndex);
+    void ParseMean(size_t subgraphIndex, size_t operatorIndex);
     void ParseMinimum(size_t subgraphIndex, size_t operatorIndex);
+    void ParseMul(size_t subgraphIndex, size_t operatorIndex);
+    void ParsePad(size_t subgraphIndex, size_t operatorIndex);
+    void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm);
     void ParseRelu(size_t subgraphIndex, size_t operatorIndex);
     void ParseRelu6(size_t subgraphIndex, size_t operatorIndex);
     void ParseReshape(size_t subgraphIndex, size_t operatorIndex);
     void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex);
     void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
     void ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex);
+    void ParseSplit(size_t subgraphIndex, size_t operatorIndex);
     void ParseSqueeze(size_t subgraphIndex, size_t operatorIndex);
     void ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex);
     void ParseSub(size_t subgraphIndex, size_t operatorIndex);
-    void ParseAdd(size_t subgraphIndex, size_t operatorIndex);
-    void ParseMul(size_t subgraphIndex, size_t operatorIndex);
-    void ParseMean(size_t subgraphIndex, size_t operatorIndex);
-    void ParsePad(size_t subgraphIndex, size_t operatorIndex);
-    void ParsePool(size_t subgraphIndex, size_t operatorIndex, armnn::PoolingAlgorithm algorithm);
-    void ParseSplit(size_t subgraphIndex, size_t operatorIndex);
     void ParseTanH(size_t subgraphIndex, size_t operatorIndex);
+    void ParseUnpack(size_t subgraphIndex, size_t operatorIndex);
 
     void RegisterProducerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IOutputSlot* slot);
     void RegisterConsumerOfTensor(size_t subgraphIndex, size_t tensorIndex, armnn::IInputSlot* slot);
diff --git a/src/armnnTfLiteParser/test/Unpack.cpp b/src/armnnTfLiteParser/test/Unpack.cpp
new file mode 100644
index 0000000..10e682e
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Unpack.cpp
@@ -0,0 +1,119 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+#include <string>
+#include <iostream>
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct UnpackFixture : public ParserFlatbuffersFixture
+{
+    explicit UnpackFixture(const std::string & inputShape,
+                           const unsigned int numberOfOutputs,
+                           const std::string & outputShape,
+                           const std::string & axis,
+                           const std::string & num)
+    {
+        // As input index is 0, output indexes start at 1
+        std::string outputIndexes = "1";
+        for(unsigned int i = 1; i < numberOfOutputs; i++)
+        {
+            outputIndexes += ", " + std::to_string(i+1);
+        }
+        m_JsonString = R"(
+            {
+                "version": 3,
+                "operator_codes": [ { "builtin_code": "UNPACK" } ],
+                "subgraphs": [ {
+                    "tensors": [
+                        {
+                            "shape": )" + inputShape + R"(,
+                            "type": "FLOAT32",
+                            "buffer": 0,
+                            "name": "inputTensor",
+                            "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },)";
+        // Append the required number of outputs for this UnpackFixture.
+        // As input index is 0, output indexes start at 1.
+        for(unsigned int i = 0; i < numberOfOutputs; i++)
+        {
+            m_JsonString += R"(
+                        {
+                            "shape": )" + outputShape + R"( ,
+                                "type": "FLOAT32",
+                                "buffer": )" + std::to_string(i + 1) + R"(,
+                                "name": "outputTensor)" + std::to_string(i + 1) + R"(",
+                                "quantization": {
+                                "min": [ 0.0 ],
+                                "max": [ 255.0 ],
+                                "scale": [ 1.0 ],
+                                "zero_point": [ 0 ],
+                            }
+                        },)";
+        }
+        m_JsonString += R"(
+                    ],
+                    "inputs": [ 0 ],
+                    "outputs": [ )" + outputIndexes + R"( ],
+                    "operators": [
+                        {
+                            "opcode_index": 0,
+                            "inputs": [ 0 ],
+                            "outputs": [ )" + outputIndexes + R"( ],
+                            "builtin_options_type": "UnpackOptions",
+                            "builtin_options": {
+                                "axis": )" + axis;
+
+                    if(!num.empty())
+                    {
+                        m_JsonString += R"(,
+                                "num" : )" + num;
+                    }
+
+                    m_JsonString += R"(
+                            },
+                            "custom_options_format": "FLEXBUFFERS"
+                        }
+                    ],
+                } ],
+                "buffers" : [
+                    { },
+                    { }
+                ]
+            }
+        )";
+        Setup();
+    }
+};
+
+struct DefaultUnpackAxisZeroFixture : UnpackFixture
+{
+    DefaultUnpackAxisZeroFixture() : UnpackFixture("[ 4, 1, 6 ]", 4, "[ 1, 6 ]", "0", "") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxisZeroFixture)
+{
+    RunTest<2, armnn::DataType::Float32>(
+        0,
+        { {"inputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
+                            7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f,
+                            13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
+                            19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f } } },
+        { {"outputTensor1", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f }},
+          {"outputTensor2", { 7.0f,  8.0f,  9.0f, 10.0f, 11.0f, 12.0f }},
+          {"outputTensor3", { 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f }},
+          {"outputTensor4", { 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f }} });
+}
+
+BOOST_AUTO_TEST_SUITE_END()