IVGCVSW-1530 Add TfLite slice parser and fix transpose perm vector creation

* TfLite slice parser and relevant tests added
* TfLite transpose parser logic added to translate Tf/np permutation
  vector definitions to Armnn definitions
* TfLite transpose parser no permute data test modified to include
  data for default permutation vector when none specified

Signed-off-by: josh minor <josh.minor@arm.com>
Change-Id: Iebd30971bd180593dc6b8f0d5be1d1bc61a3a5bf
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f088a21..21d1336 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -718,6 +718,7 @@
              src/armnnTfLiteParser/test/ResizeBilinear.cpp
              src/armnnTfLiteParser/test/Softmax.cpp
              src/armnnTfLiteParser/test/SpaceToBatchND.cpp
+             src/armnnTfLiteParser/test/Slice.cpp
              src/armnnTfLiteParser/test/Split.cpp
              src/armnnTfLiteParser/test/Squeeze.cpp
              src/armnnTfLiteParser/test/StridedSlice.cpp
diff --git a/src/armnnTfLiteParser/TensorFlowLiteSupport.md b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
index 7fa299e..145ca9f 100644
--- a/src/armnnTfLiteParser/TensorFlowLiteSupport.md
+++ b/src/armnnTfLiteParser/TensorFlowLiteSupport.md
@@ -46,6 +46,8 @@
 
 * RESIZE_BILINEAR
 
+* SLICE
+
 * SOFTMAX
 
 * SPACE_TO_BATCH
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 937131c..9a20740 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -459,6 +459,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_MEAN]              = &TfLiteParser::ParseMean;
     m_ParserFunctions[tflite::BuiltinOperator_PACK]              = &TfLiteParser::ParsePack;
     m_ParserFunctions[tflite::BuiltinOperator_PAD]               = &TfLiteParser::ParsePad;
+    m_ParserFunctions[tflite::BuiltinOperator_SLICE]             = &TfLiteParser::ParseSlice;
     m_ParserFunctions[tflite::BuiltinOperator_SPLIT]             = &TfLiteParser::ParseSplit;
     m_ParserFunctions[tflite::BuiltinOperator_TANH]              = &TfLiteParser::ParseTanH;
     m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE]         = &TfLiteParser::ParseTranspose;
@@ -934,17 +935,27 @@
 
     PermuteDescriptor desc;
 
-    if(inputs.size() == 2)
+    if (inputs.size() == 2)
     {
         armnn::TensorInfo permuteTensorInfo = ToTensorInfo(inputs[1]);
         BufferRawPtr permuteBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
-
-        std::vector<unsigned int> permuteShape(permuteTensorInfo.GetNumElements());
+        auto numPermVecElements = permuteTensorInfo.GetNumElements();
+        std::vector<unsigned int> permuteShape(numPermVecElements);
         ::memcpy(permuteShape.data(), permuteBufferPtr->data.data(), permuteTensorInfo.GetNumBytes());
 
-        PermutationVector permutationVector(permuteShape.data(), permuteTensorInfo.GetNumElements());
+        // permuteShape assumes Tf/Np permute vectors, we must translate to armnn expected form
+        // to do so we find the perm vector which would invert what a tf perm vector would do (ex 3,0,1,2 -> 1,2,3,0)
+        std::vector<unsigned int> armnnPermuteShape(numPermVecElements);
+        std::vector<unsigned int>::iterator it;
+        for (unsigned int i = 0u; i < numPermVecElements; ++i)
+        {
+            it = std::find(permuteShape.begin(), permuteShape.end(), i);
+            armnnPermuteShape[i] = static_cast<unsigned int>(std::distance(permuteShape.begin(), it));
+        }
 
-        desc =  PermuteDescriptor(permutationVector);
+        PermutationVector permutationVector(armnnPermuteShape.data(), permuteTensorInfo.GetNumElements());
+
+        desc = PermuteDescriptor(permutationVector);
     }
 
     layer = m_Network->AddPermuteLayer(desc, layerName.c_str());
@@ -1254,6 +1265,48 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 3);
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    SliceDescriptor desc;
+
+    // set begin tensor info for slice descriptor
+    armnn::TensorInfo beginTensorInfo = ToTensorInfo(inputs[1]);
+    BufferRawPtr beginBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+
+    std::vector<unsigned int> begin(beginTensorInfo.GetNumElements());
+    ::memcpy(begin.data(), beginBufferPtr->data.data(), beginTensorInfo.GetNumBytes());
+
+    // set size tensor info for slice descriptor
+    armnn::TensorInfo sizeTensorInfo = ToTensorInfo(inputs[2]);
+    BufferRawPtr sizeBufferPtr = GetBuffer(m_Model, inputs[2]->buffer);
+
+    std::vector<unsigned int> size(sizeTensorInfo.GetNumElements());
+    ::memcpy(size.data(), sizeBufferPtr->data.data(), sizeTensorInfo.GetNumBytes());
+    desc = SliceDescriptor(begin, size);
+
+    auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
+    IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    // register the input connection slots for the layer, connections are made after all layers have been created
+    // only the tensors for the inputs are relevant, exclude the const tensors
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    // register the output connection slots for the layer, connections are made after all layers have been created
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
 void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index fb01fe8..5ac6a89 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -116,6 +116,7 @@
     void ParseRelu6(size_t subgraphIndex, size_t operatorIndex);
     void ParseReshape(size_t subgraphIndex, size_t operatorIndex);
     void ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex);
+    void ParseSlice(size_t subgraphIndex, size_t operatorIndex);
     void ParseSoftmax(size_t subgraphIndex, size_t operatorIndex);
     void ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex);
     void ParseSplit(size_t subgraphIndex, size_t operatorIndex);
diff --git a/src/armnnTfLiteParser/test/Slice.cpp b/src/armnnTfLiteParser/test/Slice.cpp
new file mode 100644
index 0000000..17d1b1a
--- /dev/null
+++ b/src/armnnTfLiteParser/test/Slice.cpp
@@ -0,0 +1,176 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersFixture.hpp"
+#include "../TfLiteParser.hpp"
+
+BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+
+struct SliceFixture : public ParserFlatbuffersFixture
+{
+    explicit SliceFixture(const std::string & inputShape,
+                          const std::string & outputShape,
+                          const std::string & beginData,
+                          const std::string & sizeData)
+    {
+        m_JsonString = R"(
+            {
+                  "version": 3,
+                  "operator_codes": [
+                    {
+                      "builtin_code": "SLICE",
+                      "version": 1
+                    }
+                  ],
+                  "subgraphs": [
+                    {
+                      "tensors": [
+                        {
+                          "shape": )" + inputShape + R"(,
+                          "type": "FLOAT32",
+                          "buffer": 0,
+                          "name": "inputTensor",
+                          "quantization": {
+                            "min": [
+                              0.0
+                            ],
+                            "max": [
+                              255.0
+                            ],
+                            "details_type": 0,
+                            "quantized_dimension": 0
+                          },
+                          "is_variable": false
+                        },
+                        {
+                          "shape": )" + outputShape + R"(,
+                          "type": "FLOAT32",
+                          "buffer": 1,
+                          "name": "outputTensor",
+                          "quantization": {
+                            "details_type": 0,
+                            "quantized_dimension": 0
+                          },
+                          "is_variable": false
+                        })";
+        m_JsonString += R"(,
+                            {
+                            "shape": [
+                                3
+                            ],
+                            "type": "INT32",
+                            "buffer": 2,
+                            "name": "beginTensor",
+                            "quantization": {
+                            }
+                            })";
+        m_JsonString += R"(,
+                            {
+                            "shape": [
+                                3
+                            ],
+                            "type": "INT32",
+                            "buffer": 3,
+                            "name": "sizeTensor",
+                            "quantization": {
+                            }
+                            })";
+        m_JsonString += R"(],
+                      "inputs": [
+                        0
+                      ],
+                      "outputs": [
+                        1
+                      ],
+                      "operators": [
+                        {
+                          "opcode_index": 0,
+                          "inputs": [
+                            0,
+                            2,
+                            3)";
+        m_JsonString += R"(],
+                          "outputs": [
+                            1
+                          ],
+                          mutating_variable_inputs: [
+                          ]
+                        }
+                      ]
+                    }
+                  ],
+                  "description": "TOCO Converted.",
+                  "buffers": [
+                    { },
+                    { })";
+        m_JsonString += R"(,{"data": )" + beginData + R"( })";
+        m_JsonString += R"(,{"data": )" + sizeData + R"( })";
+        m_JsonString += R"(
+                  ]
+                }
+        )";
+        SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+    }
+};
+
+struct SliceFixtureSingleDim : SliceFixture
+{
+    SliceFixtureSingleDim() : SliceFixture("[ 3, 2, 3 ]",
+                                           "[ 1, 1, 3 ]",
+                                           "[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]",
+                                           "[ 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SliceSingleDim, SliceFixtureSingleDim)
+{
+    RunTest<3, armnn::DataType::Float32>(
+      0,
+      {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
+      {{"outputTensor", { 3, 3, 3 }}});
+
+    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+                == armnn::TensorShape({1,1,3})));
+}
+
+struct SliceFixtureD123 : SliceFixture
+{
+    SliceFixtureD123() : SliceFixture("[ 3, 2, 3 ]",
+                                      "[ 1, 2, 3 ]",
+                                      "[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]",
+                                      "[ 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SliceD123, SliceFixtureD123)
+{
+    RunTest<3, armnn::DataType::Float32>(
+        0,
+        {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
+        {{"outputTensor", { 3, 3, 3, 4, 4, 4 }}});
+
+    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+                == armnn::TensorShape({1,2,3})));
+}
+
+struct SliceFixtureD213 : SliceFixture
+{
+    SliceFixtureD213() : SliceFixture("[ 3, 2, 3 ]",
+                                      "[ 2, 1, 3 ]",
+                                      "[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]",
+                                      "[ 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(SliceD213, SliceFixtureD213)
+{
+    RunTest<3, armnn::DataType::Float32>(
+        0,
+        {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
+        {{"outputTensor", { 3, 3, 3, 5, 5, 5 }}});
+
+    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+                == armnn::TensorShape({2,1,3})));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Transpose.cpp b/src/armnnTfLiteParser/test/Transpose.cpp
index 2e3190b..b2f953e 100644
--- a/src/armnnTfLiteParser/test/Transpose.cpp
+++ b/src/armnnTfLiteParser/test/Transpose.cpp
@@ -55,24 +55,20 @@
                           },
                           "is_variable": false
                         })";
-        if (!permuteData.empty())
-        {
-            m_JsonString += R"(,
-                              {
-                                "shape": [
-                                  3
-                                ],
-                                "type": "INT32",
-                                "buffer": 2,
-                                "name": "permuteTensor",
-                                "quantization": {
-                                  "details_type": 0,
-                                  "quantized_dimension": 0
-                                },
-                                "is_variable": false
-                              })";
-        }
-
+        m_JsonString += R"(,
+                          {
+                            "shape": [
+                              3
+                            ],
+                            "type": "INT32",
+                            "buffer": 2,
+                            "name": "permuteTensor",
+                            "quantization": {
+                              "details_type": 0,
+                              "quantized_dimension": 0
+                            },
+                            "is_variable": false
+                          })";
         m_JsonString += R"(],
                       "inputs": [
                         0
@@ -85,10 +81,7 @@
                           "opcode_index": 0,
                           "inputs": [
                             0)";
-        if (!permuteData.empty())
-        {
-            m_JsonString += R"(,2)";
-        }
+        m_JsonString += R"(,2)";
         m_JsonString += R"(],
                           "outputs": [
                             1
@@ -117,6 +110,7 @@
     }
 };
 
+// Note that this assumes the Tensorflow permutation vector implementation as opposed to the armnn implemenation.
 struct TransposeFixtureWithPermuteData : TransposeFixture
 {
     TransposeFixtureWithPermuteData() : TransposeFixture("[ 2, 2, 3 ]",
@@ -128,29 +122,32 @@
 {
     RunTest<3, armnn::DataType::Float32>(
       0,
-      {{"inputTensor", {  1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}},
-      {{"outputTensor", {  1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12 }}});
+      {{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}},
+      {{"outputTensor", { 1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12 }}});
 
     BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({2,3,2})));
 }
 
+// Tensorflow default permutation behavior assumes no permute argument will create permute vector [n-1...0],
+// where n is the number of dimensions of the input tensor
+// In this case we should get output shape 3,2,2 given default permutation vector 2,1,0
 struct TransposeFixtureWithoutPermuteData : TransposeFixture
 {
     TransposeFixtureWithoutPermuteData() : TransposeFixture("[ 2, 2, 3 ]",
-                                                            "",
-                                                            "[ 2, 3, 2 ]") {}
+                                                            "[ 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 ]",
+                                                            "[ 3, 2, 2 ]") {}
 };
 
 BOOST_FIXTURE_TEST_CASE(TransposeWithoutPermuteDims, TransposeFixtureWithoutPermuteData)
 {
     RunTest<3, armnn::DataType::Float32>(
         0,
-        {{"inputTensor", {  1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}},
-        {{"outputTensor", {  1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}});
+        {{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}},
+        {{"outputTensor", { 1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12 }}});
 
     BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
-                == armnn::TensorShape({2,3,2})));
+                == armnn::TensorShape({3,2,2})));
 }
 
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file