IVGCVSW-1650 Add Support for Reshape layer on TF Lite parser
 * Added Reshape operator support for the TfLite Parser.

Change-Id: I64a5650dac089905a402be4a9cb6032aa0d81f00
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index dd1f577..13e4604 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -24,6 +24,7 @@
 #include <fstream>
 #include <algorithm>
 #include <limits>
+#include <numeric>
 
 using namespace armnn;
 using armnn::CheckLocation;
@@ -457,6 +458,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]           =  &TfLiteParser::ParseSqueeze;
     m_ParserFunctions[tflite::BuiltinOperator_RELU]              =  &TfLiteParser::ParseRelu;
     m_ParserFunctions[tflite::BuiltinOperator_RELU6]             =  &TfLiteParser::ParseRelu6;
+    m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]           =  &TfLiteParser::ParseReshape;
 }
 
 void TfLiteParser::ResetParser()
@@ -1033,6 +1035,68 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
+armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
+                                                     const std::vector<int32_t> & targetDimsIn)
+{
+    std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
+    const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
+
+    if (stretchDim != targetDimsIn.end())
+    {
+        if (std::find(std::next(stretchDim), targetDimsIn.end(), -1) != targetDimsIn.end())
+        {
+            throw ParseException(
+                boost::str(
+                    boost::format("At most one component of shape can be -1 %1%") % CHECK_LOCATION().AsString()));
+        }
+
+        auto targetNumElements =
+            boost::numeric_cast<unsigned int>(
+                std::accumulate(targetDimsIn.begin(), targetDimsIn.end(), -1, std::multiplies<int32_t>()));
+
+        auto stretchIndex = static_cast<size_t>(std::distance(targetDimsIn.begin(), stretchDim));
+        outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
+    }
+
+    TensorShape outputShape = TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
+
+    TensorInfo reshapeInfo = inputTensorInfo;
+    reshapeInfo.SetShape(outputShape);
+
+    return reshapeInfo;
+}
+
+void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
+    const auto * options = operatorPtr->builtin_options.AsReshapeOptions();
+
+    armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
+    armnn::TensorInfo outputTensorInfo =
+        TfLiteParser::OutputShapeOfReshape(inputTensorInfo, options->new_shape);
+
+    ReshapeDescriptor reshapeDesc;
+    reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
+
+    auto layerName = boost::str(boost::format("Reshape:%1%:%2%") % subgraphIndex % operatorIndex);
+    IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
+}
+
 armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
                                                                 unsigned int outputSlot,
                                                                 tflite::ActivationFunctionType activationType)