IVGCVSW-4932 Introduce ShapeInferenceMethod to TfLite Parser

* Introduced ShapeInferenceMethod::InferAndValidate to TfLiteParser
* Added unit tests

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Iad4aadce92912e7645b1652019ec4af478b7fc32
diff --git a/include/armnnTfLiteParser/ITfLiteParser.hpp b/include/armnnTfLiteParser/ITfLiteParser.hpp
index de1eae7..a68b719 100644
--- a/include/armnnTfLiteParser/ITfLiteParser.hpp
+++ b/include/armnnTfLiteParser/ITfLiteParser.hpp
@@ -28,9 +28,11 @@
     struct TfLiteParserOptions
     {
         TfLiteParserOptions()
-            : m_StandInLayerForUnsupported(false) {}
+            : m_StandInLayerForUnsupported(false),
+              m_InferAndValidate(false) {}
 
         bool m_StandInLayerForUnsupported;
+        bool m_InferAndValidate;
     };
 
     static ITfLiteParser* CreateRaw(const armnn::Optional<TfLiteParserOptions>& options = armnn::EmptyOptional());
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index 286d9f3..4536035 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -74,20 +74,26 @@
 template <typename T, std::size_t n>
 boost::test_tools::predicate_result CompareTensors(const boost::multi_array<T, n>& a,
                                                    const boost::multi_array<T, n>& b,
-                                                   bool compareBoolean = false)
+                                                   bool compareBoolean = false,
+                                                   bool isDynamic = false)
 {
-    // Checks they are same shape.
-    for (unsigned int i=0; i<n; i++)
+    if (!isDynamic)
     {
-        if (a.shape()[i] != b.shape()[i])
+        // Checks they are same shape.
+        for (unsigned int i = 0;
+             i < n;
+             i++)
         {
-            boost::test_tools::predicate_result res(false);
-            res.message() << "Different shapes ["
-                        << a.shape()[i]
-                        << "!="
-                        << b.shape()[i]
-                        << "]";
-            return res;
+            if (a.shape()[i] != b.shape()[i])
+            {
+                boost::test_tools::predicate_result res(false);
+                res.message() << "Different shapes ["
+                              << a.shape()[i]
+                              << "!="
+                              << b.shape()[i]
+                              << "]";
+                return res;
+            }
         }
     }
 
@@ -190,9 +196,13 @@
 
 // Creates a boost::multi_array with the shape defined by the given TensorInfo and contents defined by the given vector.
 template <typename T, std::size_t n>
-boost::multi_array<T, n> MakeTensor(const armnn::TensorInfo& tensorInfo, const std::vector<T>& flat)
+boost::multi_array<T, n> MakeTensor(
+    const armnn::TensorInfo& tensorInfo, const std::vector<T>& flat, bool isDynamic = false)
 {
-    ARMNN_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
+    if (!isDynamic)
+    {
+        ARMNN_ASSERT_MSG(flat.size() == tensorInfo.GetNumElements(), "Wrong number of components supplied to tensor");
+    }
 
     std::array<unsigned int, n> shape;
 
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 1a44493..7b49617 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -5,6 +5,7 @@
 
 #include "TfLiteParser.hpp"
 
+#include <armnn/BackendOptions.hpp>
 #include <armnn/Descriptors.hpp>
 #include <armnn/Exceptions.hpp>
 #include <armnn/Logging.hpp>
@@ -311,8 +312,10 @@
     }
 }
 
-armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr, const std::vector<unsigned int>& shapes,
-                               const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
+armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
+                               const std::vector<unsigned int>& shapes,
+                               const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3},
+                               const bool outputTensor = false)
 {
     armnn::DataType type;
     CHECK_TENSOR_PTR(tensorPtr);
@@ -357,9 +360,14 @@
         }
     }
     std::vector<unsigned int> safeShape = shapes;
+    bool isDynamic = false;
     if (safeShape.size() == 0)
     {
         safeShape.push_back(1);
+        if (outputTensor)
+        {
+            isDynamic = true;
+        }
     }
 
     float quantizationScale = 0.0f;
@@ -383,12 +391,16 @@
                 quantizationOffset = boost::numeric_cast<int32_t>(tensorPtr->quantization->zero_point[0]);
             }
 
-            armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
-                              safeShape.data(),
-                              type,
-                              quantizationScale,
-                              quantizationOffset);
-
+            TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+                                    safeShape.data());
+            if (isDynamic)
+            {
+                tensorShape = TensorShape(1, false);
+            }
+            armnn::TensorInfo result(tensorShape,
+                                     type,
+                                     quantizationScale,
+                                     quantizationOffset);
             return result;
         }
         else
@@ -402,19 +414,29 @@
                       std::back_inserter(quantizationScales));
 
             // QSymmS8 Per-axis
-            armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
-                              safeShape.data(),
-                              type,
-                              quantizationScales,
-                              dimensionMappings[boost::numeric_cast<unsigned int>(
-                              tensorPtr->quantization->quantized_dimension)]);
+            TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+                                    safeShape.data());
+            if (isDynamic)
+            {
+                tensorShape = TensorShape(1, false);
+            }
+            armnn::TensorInfo result(tensorShape,
+                                     type,
+                                     quantizationScales,
+                                     dimensionMappings[boost::numeric_cast<unsigned int>(
+                                         tensorPtr->quantization->quantized_dimension)]);
             return result;
         }
     }
     else
     {
-        armnn::TensorInfo result(boost::numeric_cast<unsigned int>(safeShape.size()),
-                                 safeShape.data(),
+        TensorShape tensorShape(boost::numeric_cast<unsigned int>(safeShape.size()),
+                                safeShape.data());
+        if (isDynamic)
+        {
+            tensorShape = TensorShape(1, false);
+        }
+        armnn::TensorInfo result(tensorShape,
                                  type,
                                  quantizationScale,
                                  quantizationOffset);
@@ -429,6 +451,14 @@
     return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
 }
 
+armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
+                               const bool outputTensor)
+{
+    auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
+    const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3};
+    return ToTensorInfo(tensorPtr, dimensions, dimensionMappings, outputTensor);
+}
+
 template<typename T>
 std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
 CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
@@ -659,7 +689,20 @@
 
 INetworkPtr TfLiteParser::CreateNetworkFromModel()
 {
-    m_Network = INetwork::Create();
+
+    using NetworkOptions = std::vector<BackendOptions>;
+    NetworkOptions networkOptions = {};
+    if (m_Options && m_Options.value().m_InferAndValidate)
+    {
+        BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
+                                                  {
+                                                      { "InferAndValidate", true }
+                                                  });
+
+        networkOptions.push_back(shapeInferenceMethodOption);
+    }
+
+    m_Network = INetwork::Create(networkOptions);
     ARMNN_ASSERT(m_Model.get() != nullptr);
 
     if (m_Model->subgraphs.size() != 1)
@@ -734,6 +777,26 @@
         }
     }
 
+    // if InferAndValidate set make sure all the TensorInfo set and all the dynamic output tensors are inferred
+    if (m_Options && m_Options.value().m_InferAndValidate)
+    {
+        for (subgraphIndex = 0;
+             subgraphIndex < m_SubgraphConnections.size();
+             ++subgraphIndex)
+        {
+            if (m_SubgraphConnections[subgraphIndex].size() > 0)
+            {
+                // get the last output slot on the layer
+                auto outputSlot =
+                    m_SubgraphConnections[subgraphIndex][m_SubgraphConnections[subgraphIndex].size() - 1].outputSlot;
+                if (outputSlot != nullptr)
+                {
+                    outputSlot->IsTensorInfoSet();
+                }
+            }
+        }
+    }
+
     return std::move(m_Network);
 }
 
@@ -835,7 +898,7 @@
 
     for (unsigned int i = 0u; i < numOutputs; ++i)
     {
-        layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i]));
+        layer->GetOutputSlot(i).SetTensorInfo(ToTensorInfo(outputs[i], true));
     }
 
     auto inputTensorIds  = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -914,7 +977,7 @@
 
     ARMNN_ASSERT(layer != nullptr);
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // register the input connection slots for the layer, connections are made after all layers have been created
@@ -1001,7 +1064,7 @@
     }
     ARMNN_ASSERT(layer != nullptr);
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // register the input connection slots for the layer, connections are made after all layers have been created
@@ -1030,7 +1093,7 @@
     IConnectableLayer* layer = m_Network->AddDequantizeLayer(layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1057,7 +1120,7 @@
     IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1093,7 +1156,7 @@
     }
 
     TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
     IConnectableLayer* layer = m_Network->AddTransposeLayer(desc, layerName.c_str());
@@ -1188,7 +1251,7 @@
 
     ARMNN_ASSERT(layer != nullptr);
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // only the tensors for the inputs are relevant, exclude the const (filter) tensor
@@ -1241,7 +1304,7 @@
     auto layerName = boost::str(boost::format("BatchToSpaceND:%1%:%2%") % subgraphIndex % operatorIndex);
 
     TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
     IConnectableLayer* layer = m_Network->AddBatchToSpaceNdLayer(desc, layerName.c_str());
@@ -1272,7 +1335,7 @@
 
     ARMNN_ASSERT(layer != nullptr);
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1303,7 +1366,7 @@
     TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
     CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
     IConnectableLayer* layer = m_Network->AddMaximumLayer(layerName.c_str());
@@ -1340,7 +1403,7 @@
     TensorInfo input1TensorInfo = ToTensorInfo(inputs[1]);
     CheckMatchingQuantization(inputTensorInfo, input1TensorInfo, layerName, "Input 0", "Input 1");
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
     IConnectableLayer* layer = m_Network->AddMinimumLayer(layerName.c_str());
@@ -1415,7 +1478,7 @@
     auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
     IConnectableLayer* layer = m_Network->AddPooling2dLayer(desc, layerName.c_str());
@@ -1462,7 +1525,7 @@
     auto layerName = boost::str(boost::format("Slice:%1%:%2%") % subgraphIndex % operatorIndex);
 
     TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
     IConnectableLayer* const layer = m_Network->AddSliceLayer(desc, layerName.c_str());
@@ -1495,7 +1558,7 @@
     auto layerName = boost::str(boost::format("Softmax:%1%:%2%") % subgraphIndex % operatorIndex);
     IConnectableLayer* const layer = m_Network->AddSoftmaxLayer(desc, layerName.c_str());
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // register the input connection slots for the layer, connections are made after all layers have been created
@@ -1545,7 +1608,7 @@
     auto layerName = boost::str(boost::format("SpaceToBatchND:%1%:%2%") % subgraphIndex % operatorIndex);
 
     TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
     IConnectableLayer* layer = m_Network->AddSpaceToBatchNdLayer(desc, layerName.c_str());
@@ -1692,7 +1755,7 @@
     IConnectableLayer* layer = m_Network->AddStridedSliceLayer(desc, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1722,7 +1785,7 @@
     IConnectableLayer* layer = m_Network->AddSubtractionLayer(layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1761,7 +1824,7 @@
     IConnectableLayer* layer = m_Network->AddDivisionLayer(layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1800,7 +1863,7 @@
     IConnectableLayer* layer = m_Network->AddAdditionLayer(layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1839,7 +1902,7 @@
     IConnectableLayer* layer = m_Network->AddMultiplicationLayer(layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1876,7 +1939,7 @@
     desc.m_Axis = axis;
 
     armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
 
     desc.m_KeepDims =
         inputTensorInfo.GetNumDimensions() == outputTensorInfo.GetNumDimensions() ?
@@ -1910,7 +1973,7 @@
   IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
   ARMNN_ASSERT(layer != nullptr);
 
-  TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+  TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
   layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
   auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -1943,7 +2006,7 @@
     }
 
     auto layerName = boost::str(boost::format("Pad:%1%:%2%") % subgraphIndex % operatorIndex);
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
 
     IConnectableLayer* layer = m_Network->AddPadLayer(desc, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
@@ -1971,7 +2034,7 @@
     IConnectableLayer* layer = m_Network->AddQuantizeLayer(layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -2073,7 +2136,7 @@
 
     IConnectableLayer* const layer = m_Network->AddActivationLayer(activationDesc, layerName.c_str());
 
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // register the input connection slots for the layer, connections are made after all layers have been created
@@ -2279,7 +2342,7 @@
     }
 
     TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
     IConnectableLayer* layer = m_Network->AddResizeLayer(desc, layerName.c_str());
@@ -2327,7 +2390,7 @@
     }
 
     auto layerName = boost::str(boost::format("Concatenation:%1%:%2%") % subgraphIndex % operatorIndex);
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
 
     IConnectableLayer* layer = m_Network->AddConcatLayer(concatDescriptor, layerName.c_str());
     ARMNN_ASSERT(layer != nullptr);
@@ -2446,7 +2509,7 @@
         RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
     }
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     // we need to add the activation layer and fortunately we don't need to care about the data layout
@@ -2566,7 +2629,7 @@
 
     ARMNN_ASSERT(layer != nullptr);
 
-    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
     layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
@@ -2660,7 +2723,7 @@
     // Create reshape to remove the unpacked dimension for unpack operator of each output from Splitter.
     for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
     {
-        armnn::TensorInfo outputTensorInfo  = ToTensorInfo(outputs[k]);
+        armnn::TensorInfo outputTensorInfo  = ToTensorInfo(outputs[k], true);
         std::string reshapeLayerName = boost::str(boost::format("Reshape_for:%1%") % layer->GetName());
         armnn::ReshapeDescriptor desc;
         desc.m_TargetShape = outputTensorInfo.GetShape();
@@ -2757,7 +2820,7 @@
 
     for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
     {
-        armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
+        armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
         layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
     }
 
@@ -2905,7 +2968,7 @@
 
     for (unsigned int k = 0; k < layer->GetNumOutputSlots(); ++k)
     {
-        armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k]);
+        armnn::TensorInfo tensorInfo = ToTensorInfo(outputs[k], true);
         layer->GetOutputSlot(k).SetTensorInfo(tensorInfo);
     }
 
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
index 2eae5f5..8ef827c 100644
--- a/src/armnnTfLiteParser/test/Conv2D.cpp
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -234,6 +234,32 @@
         });
 }
 
+struct DynamicConv2DWithBiasesFixture : Conv2DWithBiasesFixture
+{
+    DynamicConv2DWithBiasesFixture()
+        : Conv2DWithBiasesFixture("[ 1, 2, 2, 1 ]",    // inputShape
+                                  "[ ]",              // outputShape
+                                  "[ 1, 2, 2, 1 ]",    // filterShape
+                                  "[ 2,1, 0,6 ]",      // filterData
+                                  "[ 1 ]",             // biasShape
+                                  "[ 10, 0, 0, 0 ]",   // biasData
+                                  "1")                 // stride w and h
+    {}
+};
+
+BOOST_FIXTURE_TEST_CASE( ParseDynamicConv2DWithBias, DynamicConv2DWithBiasesFixture )
+{
+    RunTest<4,
+        armnn::DataType::QAsymmU8,
+        armnn::DataType::QAsymmU8>(0,
+                                   { { "inputTensor", { 1, 2, 3, 4, } } },
+                                   { { "outputTensor", {   (1*2 + 2*1 + 3*0 + 4*6 + 10)/2,
+                                                           (2*2 + 0*1 + 4*0 + 0*6 + 10)/2,
+                                                           (3*2 + 4*1 + 0*0 + 0*6 + 10)/2,
+                                                           (4*2 + 0*1 + 0*0 + 0*6 + 10)/2} } },
+                                   true);
+}
+
 struct Conv2DShapeTestFixture : Conv2DWithBiasesFixture
 {
     static std::string GenerateInts(unsigned int n)
diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
index 2bf08fa..5d220eb 100644
--- a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
@@ -196,4 +196,30 @@
           (110+10)/2, (197+10)/2, (158+10)/2 });
 }
 
+struct DynamicDepthwiseConvolution2dSameBiasFixture : DepthwiseConvolution2dFixture
+{
+    DynamicDepthwiseConvolution2dSameBiasFixture()
+        : DepthwiseConvolution2dFixture("[ 1, 3, 3, 1 ]",           // inputShape
+                                        "[ ]",           // outputShape
+                                        "[ 1, 3, 3, 1 ]",           // filterShape
+                                        "[ 9,8,7, 6,5,4, 3,2,1 ]",  // filterData
+                                        "1",                        // stride w and h
+                                        "SAME",                     // padding type
+                                        "[ 1 ]",                    // biasShape
+                                        "[ 10, 0, 0, 0 ]")          // biasData
+    {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDynamicDepthwiseConv2DSameBias, DynamicDepthwiseConvolution2dSameBiasFixture)
+{
+    RunTest<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(0,
+                                                      { { "inputTensor", { 0, 1, 2,
+                                                                            3, 4, 5,
+                                                                            6, 7, 8 } } },
+                                                      { { "outputTensor", { ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
+                                                                            ( 57+10)/2, (120+10)/2, (111+10)/2,
+                                                                            (110+10)/2, (197+10)/2, (158+10)/2  } } },
+                                                      true);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Div.cpp b/src/armnnTfLiteParser/test/Div.cpp
index f83e455..10be29d 100644
--- a/src/armnnTfLiteParser/test/Div.cpp
+++ b/src/armnnTfLiteParser/test/Div.cpp
@@ -112,4 +112,30 @@
                                                           1.0f,  1.0f, -1.0f } } });
 }
 
+
+struct DynamicDivFixture : public DivFixture
+{
+    DynamicDivFixture() : DivFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[  ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(ParseDynamicDiv, DynamicDivFixture)
+{
+    using armnn::DataType;
+    float Inf = std::numeric_limits<float>::infinity();
+    float NaN = std::numeric_limits<float>::quiet_NaN();
+
+    RunTest<4, DataType::Float32, DataType::Float32>(0, {{ "inputTensor1", { 0.0f,  1.0f,  2.0f,
+                                                            3.0f,  4.0f,  5.0f,
+                                                            6.0f,  7.0f,  8.0f,
+                                                            9.0f, 10.0f, -11.0f } },
+                                      { "inputTensor2", { 0.0f,  0.0f,  4.0f,
+                                                            3.0f,  40.0f,  5.0f,
+                                                            6.0f,  7.0f,  8.0f,
+                                                            9.0f,  10.0f,  11.0f} } },
+                                  {{ "outputTensor", { NaN,   Inf,  0.5f,
+                                                         1.0f,  0.1f, 1.0f,
+                                                         1.0f,  1.0f, 1.0f,
+                                                         1.0f,  1.0f, -1.0f } } }, true);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/FullyConnected.cpp b/src/armnnTfLiteParser/test/FullyConnected.cpp
index d1223d5..e7aa908 100644
--- a/src/armnnTfLiteParser/test/FullyConnected.cpp
+++ b/src/armnnTfLiteParser/test/FullyConnected.cpp
@@ -171,4 +171,28 @@
             { (40+10)/2, (400+10)/2 });
 }
 
+struct DynamicFullyConnectedWithBiasMultipleOutputsFixture : FullyConnectedFixture
+{
+    DynamicFullyConnectedWithBiasMultipleOutputsFixture()
+        : FullyConnectedFixture("[ 1, 4, 2, 1 ]",     // inputShape
+                                "[ ]",               // outputShape
+                                "[ 1, 4 ]",           // filterShape
+                                "[ 2, 3, 4, 5 ]",     // filterData
+                                "[ 1 ]",              // biasShape
+                                "[ 10, 0, 0, 0 ]" )   // biasData
+    { }
+};
+
+BOOST_FIXTURE_TEST_CASE(
+    DynamicFullyConnectedWithBiasMultipleOutputs,
+    DynamicFullyConnectedWithBiasMultipleOutputsFixture)
+{
+    RunTest<2,
+            armnn::DataType::QAsymmU8,
+            armnn::DataType::QAsymmU8>(0,
+                                      { { "inputTensor", { 1, 2, 3, 4, 10, 20, 30, 40} } },
+                                      { { "outputTensor", { (40+10)/2, (400+10)/2 } } },
+                                      true);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 891e0be..f2f723b 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -42,6 +42,7 @@
     {
         ITfLiteParser::TfLiteParserOptions options;
         options.m_StandInLayerForUnsupported = true;
+        options.m_InferAndValidate = true;
 
         m_Parser.reset(ITfLiteParser::CreateRaw(armnn::Optional<ITfLiteParser::TfLiteParserOptions>(options)));
     }
@@ -149,7 +150,8 @@
               armnn::DataType ArmnnType2>
     void RunTest(size_t subgraphId,
                  const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType1>>>& inputData,
-                 const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType2>>>& expectedOutputData);
+                 const std::map<std::string, std::vector<armnn::ResolveType<ArmnnType2>>>& expectedOutputData,
+                 bool isDynamic = false);
 
 
     /// Multiple Inputs, Multiple Outputs w/ Variable Datatypes and different dimension sizes.
@@ -248,7 +250,8 @@
           armnn::DataType armnnType2>
 void ParserFlatbuffersFixture::RunTest(size_t subgraphId,
     const std::map<std::string, std::vector<armnn::ResolveType<armnnType1>>>& inputData,
-    const std::map<std::string, std::vector<armnn::ResolveType<armnnType2>>>& expectedOutputData)
+    const std::map<std::string, std::vector<armnn::ResolveType<armnnType2>>>& expectedOutputData,
+    bool isDynamic)
 {
     using DataType2 = armnn::ResolveType<armnnType2>;
 
@@ -289,8 +292,8 @@
     for (auto&& it : expectedOutputData)
     {
         armnn::BindingPointInfo bindingInfo = m_Parser->GetNetworkOutputBindingInfo(subgraphId, it.first);
-        auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second);
-        BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first]));
+        auto outputExpected = MakeTensor<DataType2, NumOutputDimensions>(bindingInfo.second, it.second, isDynamic);
+        BOOST_TEST(CompareTensors(outputExpected, outputStorage[it.first], false, isDynamic));
     }
 }
 
diff --git a/src/armnnTfLiteParser/test/Reshape.cpp b/src/armnnTfLiteParser/test/Reshape.cpp
index 6ed568c..025612f 100644
--- a/src/armnnTfLiteParser/test/Reshape.cpp
+++ b/src/armnnTfLiteParser/test/Reshape.cpp
@@ -138,4 +138,22 @@
                 == armnn::TensorShape({2,3,3})));
 }
 
+struct DynamicReshapeFixtureWithReshapeDimsFlattenOneDim : ReshapeFixture
+{
+    DynamicReshapeFixtureWithReshapeDimsFlattenOneDim() : ReshapeFixture("[ 2, 9 ]",
+                                                                         "[ ]",
+                                                                         "[ 2, -1, 3 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(DynParseReshapeWithReshapeDimsFlattenOneDim, DynamicReshapeFixtureWithReshapeDimsFlattenOneDim)
+{
+    SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+     RunTest<3,
+        armnn::DataType::QAsymmU8,
+        armnn::DataType::QAsymmU8>(0,
+                                   { { "inputTensor", {  1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 } } },
+                                   { { "outputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 } } },
+                                   true);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/Slice.cpp b/src/armnnTfLiteParser/test/Slice.cpp
index 17d1b1a..b94a983 100644
--- a/src/armnnTfLiteParser/test/Slice.cpp
+++ b/src/armnnTfLiteParser/test/Slice.cpp
@@ -173,4 +173,21 @@
                 == armnn::TensorShape({2,1,3})));
 }
 
+struct DynamicSliceFixtureD213 : SliceFixture
+{
+    DynamicSliceFixtureD213() : SliceFixture("[ 3, 2, 3 ]",
+                                            "[ ]",
+                                              "[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]",
+                                                "[ 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(DynamicSliceD213, DynamicSliceFixtureD213)
+{
+    RunTest<3, armnn::DataType::Float32, armnn::DataType::Float32>(
+        0,
+        {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
+        {{"outputTensor", { 3, 3, 3, 5, 5, 5 }}},
+        true);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Sub.cpp b/src/armnnTfLiteParser/test/Sub.cpp
index 6a251a5..2854d81 100644
--- a/src/armnnTfLiteParser/test/Sub.cpp
+++ b/src/armnnTfLiteParser/test/Sub.cpp
@@ -104,4 +104,21 @@
       {{"outputTensor", { 1, 3, 5, 7 }}});
 }
 
+struct DynamicSubFixture : SubFixture
+{
+    DynamicSubFixture() : SubFixture("[ 1, 4 ]",
+                                     "[ 1, 4 ]",
+                                     "[  ]") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(DynamicSub, DynamicSubFixture)
+{
+    RunTest<2, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
+        0,
+        {{"inputTensor1", { 4, 5, 6, 7 }},
+         {"inputTensor2", { 3, 2, 1, 0 }}},
+        {{"outputTensor", { 1, 3, 5, 7 }}},
+        true);
+}
+
 BOOST_AUTO_TEST_SUITE_END()