IVGCVSW-5592 Implement Pimpl Idiom for Tf and TfLite Parsers


Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I4a82aca4a2c47b3c598b91bc0075c09397be728a
diff --git a/include/armnnTfLiteParser/ITfLiteParser.hpp b/include/armnnTfLiteParser/ITfLiteParser.hpp
index a68b719..b286c1e 100644
--- a/include/armnnTfLiteParser/ITfLiteParser.hpp
+++ b/include/armnnTfLiteParser/ITfLiteParser.hpp
@@ -19,6 +19,7 @@
 
 using BindingPointInfo = armnn::BindingPointInfo;
 
+class TfLiteParserImpl;
 class ITfLiteParser;
 using ITfLiteParserPtr = std::unique_ptr<ITfLiteParser, void(*)(ITfLiteParser* parser)>;
 
@@ -40,32 +41,35 @@
     static void Destroy(ITfLiteParser* parser);
 
     /// Create the network from a flatbuffers binary file on disk
-    virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile) = 0;
+    armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile);
 
     /// Create the network from a flatbuffers binary
-    virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent) = 0;
+    armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent);
 
     /// Retrieve binding info (layer id and tensor info) for the network input identified by
     /// the given layer name and subgraph id
-    virtual BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId,
-                                                        const std::string& name) const = 0;
+    BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId,
+                                                const std::string& name) const;
 
     /// Retrieve binding info (layer id and tensor info) for the network output identified by
     /// the given layer name and subgraph id
-    virtual BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId,
-                                                         const std::string& name) const = 0;
+    BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId,
+                                                         const std::string& name) const;
 
     /// Return the number of subgraphs in the parsed model
-    virtual size_t GetSubgraphCount() const = 0;
+    size_t GetSubgraphCount() const;
 
     /// Return the input tensor names for a given subgraph
-    virtual std::vector<std::string> GetSubgraphInputTensorNames(size_t subgraphId) const = 0;
+    std::vector<std::string> GetSubgraphInputTensorNames(size_t subgraphId) const;
 
     /// Return the output tensor names for a given subgraph
-    virtual std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId) const = 0;
+    std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId) const;
 
-protected:
-    virtual ~ITfLiteParser() {};
+private:
+    ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options = armnn::EmptyOptional());
+    ~ITfLiteParser();
+
+    std::unique_ptr<TfLiteParserImpl> pTfLiteParserImpl;
 };
 
 }
diff --git a/include/armnnTfParser/ITfParser.hpp b/include/armnnTfParser/ITfParser.hpp
index b0ffc0d..91e4cb3 100644
--- a/include/armnnTfParser/ITfParser.hpp
+++ b/include/armnnTfParser/ITfParser.hpp
@@ -30,31 +30,48 @@
     static void Destroy(ITfParser* parser);
 
     /// Create the network from a protobuf text file on the disk.
-    virtual armnn::INetworkPtr CreateNetworkFromTextFile(
+    armnn::INetworkPtr CreateNetworkFromTextFile(
         const char* graphFile,
         const std::map<std::string, armnn::TensorShape>& inputShapes,
-        const std::vector<std::string>& requestedOutputs) = 0;
+        const std::vector<std::string>& requestedOutputs);
 
     /// Create the network from a protobuf binary file on the disk.
-    virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(
+    armnn::INetworkPtr CreateNetworkFromBinaryFile(
         const char* graphFile,
         const std::map<std::string, armnn::TensorShape>& inputShapes,
-        const std::vector<std::string>& requestedOutputs) = 0;
+        const std::vector<std::string>& requestedOutputs);
 
     /// Create the network directly from protobuf text in a string. Useful for debugging/testing.
-    virtual armnn::INetworkPtr CreateNetworkFromString(
+    armnn::INetworkPtr CreateNetworkFromString(
         const char* protoText,
         const std::map<std::string, armnn::TensorShape>& inputShapes,
-        const std::vector<std::string>& requestedOutputs) = 0;
+        const std::vector<std::string>& requestedOutputs);
 
     /// Retrieve binding info (layer id and tensor info) for the network input identified by the given layer name.
-    virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const = 0;
+    BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const;
 
     /// Retrieve binding info (layer id and tensor info) for the network output identified by the given layer name.
-    virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const = 0;
+    BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const;
 
-protected:
-    virtual ~ITfParser() {};
+private:
+    template <typename T>
+    friend class ParsedConstTfOperation;
+    friend class ParsedMatMulTfOperation;
+    friend class ParsedMulTfOperation;
+    friend class ParsedTfOperation;
+    friend class SingleLayerParsedTfOperation;
+    friend class DeferredSingleLayerParsedTfOperation;
+    friend class ParsedIdentityTfOperation;
+
+    template <template<typename> class OperatorType, typename T>
+    friend struct MakeTfOperation;
+
+
+    ITfParser();
+    ~ITfParser();
+
+    struct TfParserImpl;
+    std::unique_ptr<TfParserImpl> pTfParserImpl;
 };
 
 }
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index db60224..ac0e40e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -47,12 +47,70 @@
 using armnn::CheckLocation;
 namespace armnnTfLiteParser
 {
+
+ITfLiteParser::ITfLiteParser(const armnn::Optional<TfLiteParserOptions>& options) :
+    pTfLiteParserImpl(new TfLiteParserImpl(options)) {}
+
+ITfLiteParser::~ITfLiteParser() = default;
+
+ITfLiteParser* ITfLiteParser::CreateRaw(const armnn::Optional<TfLiteParserOptions>& options)
+{
+    return new ITfLiteParser(options);
+}
+
+ITfLiteParserPtr ITfLiteParser::Create(const armnn::Optional<TfLiteParserOptions>& options)
+{
+    return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
+}
+
+void ITfLiteParser::Destroy(ITfLiteParser* parser)
+{
+    delete parser;
+}
+
+armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
+{
+    return pTfLiteParserImpl->CreateNetworkFromBinaryFile(graphFile);
+}
+
+armnn::INetworkPtr ITfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
+{
+    return pTfLiteParserImpl->CreateNetworkFromBinary(binaryContent);
+}
+
+BindingPointInfo ITfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
+                                                           const std::string& name) const
+{
+    return pTfLiteParserImpl->GetNetworkInputBindingInfo(subgraphId, name);
+}
+
+BindingPointInfo ITfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
+                                                            const std::string& name) const
+{
+    return pTfLiteParserImpl->GetNetworkOutputBindingInfo(subgraphId, name);
+}
+
+size_t ITfLiteParser::GetSubgraphCount() const
+{
+    return pTfLiteParserImpl->GetSubgraphCount();
+}
+
+std::vector<std::string> ITfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
+{
+    return pTfLiteParserImpl->GetSubgraphInputTensorNames(subgraphId);
+}
+
+std::vector<std::string> ITfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
+{
+    return pTfLiteParserImpl->GetSubgraphOutputTensorNames(subgraphId);
+}
+
 namespace
 {
 
 const uint32_t VIRTUAL_OPERATOR_ID = std::numeric_limits<uint32_t>::max();
 
-void CheckSubgraph(const TfLiteParser::ModelPtr & model,
+void CheckSubgraph(const TfLiteParserImpl::ModelPtr & model,
                    size_t subgraphIndex,
                    const CheckLocation & location)
 {
@@ -80,7 +138,7 @@
 #define CHECK_SUBGRAPH(MODEL, SUBGRAPH_INDEX) \
     CheckSubgraph(MODEL, SUBGRAPH_INDEX, CHECK_LOCATION())
 
-void CheckModel(const TfLiteParser::ModelPtr & model,
+void CheckModel(const TfLiteParserImpl::ModelPtr & model,
                 size_t subgraphIndex,
                 size_t operatorIndex,
                 const CheckLocation & location)
@@ -122,7 +180,7 @@
 #define CHECK_MODEL(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX) \
     CheckModel(MODEL, SUBGRAPH_INDEX, OPERATOR_INDEX, CHECK_LOCATION())
 
-void CheckTensor(const TfLiteParser::ModelPtr & model,
+void CheckTensor(const TfLiteParserImpl::ModelPtr & model,
                  size_t subgraphIndex,
                  size_t tensorIndex,
                  const CheckLocation & location)
@@ -151,7 +209,7 @@
 #define CHECK_TENSOR(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX) \
     CheckTensor(MODEL, SUBGRAPH_INDEX, TENSOR_INDEX, CHECK_LOCATION())
 
-void CheckTensorPtr(TfLiteParser::TensorRawPtr rawPtr,
+void CheckTensorPtr(TfLiteParserImpl::TensorRawPtr rawPtr,
                     const CheckLocation & location)
 {
     if (rawPtr == nullptr)
@@ -164,7 +222,7 @@
 #define CHECK_TENSOR_PTR(TENSOR_PTR) \
     CheckTensorPtr(TENSOR_PTR, CHECK_LOCATION())
 
-void CheckBuffer(const TfLiteParser::ModelPtr & model,
+void CheckBuffer(const TfLiteParserImpl::ModelPtr & model,
                  size_t bufferIndex,
                  const CheckLocation & location)
 {
@@ -199,7 +257,7 @@
 #define CHECK_BUFFER(MODEL, BUFFER_INDEX) \
     CheckBuffer(MODEL, BUFFER_INDEX, CHECK_LOCATION())
 
-void CheckBufferSize(TfLiteParser::BufferRawPtr bufferPtr,
+void CheckBufferSize(TfLiteParserImpl::BufferRawPtr bufferPtr,
                      const armnn::TensorInfo & tensorInfo,
                      uint32_t bufferId,
                      const CheckLocation & location)
@@ -296,7 +354,7 @@
     }
 }
 
-armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
+armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
                                const std::vector<unsigned int>& shapes,
                                const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3},
                                const bool outputTensor = false)
@@ -429,14 +487,14 @@
     }
 }
 
-armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
+armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
                                const armnn::PermutationVector& dimensionMappings = {0, 1, 2, 3})
 {
     auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
     return ToTensorInfo(tensorPtr, dimensions, dimensionMappings);
 }
 
-armnn::TensorInfo ToTensorInfo(TfLiteParser::TensorRawPtr tensorPtr,
+armnn::TensorInfo ToTensorInfo(TfLiteParserImpl::TensorRawPtr tensorPtr,
                                const bool outputTensor)
 {
     auto const & dimensions = AsUnsignedVector(tensorPtr->shape);
@@ -446,8 +504,8 @@
 
 template<typename T>
 std::pair<armnn::ConstTensor, std::unique_ptr<T[]>>
-CreateConstTensorImpl(TfLiteParser::BufferRawPtr bufferPtr,
-                      TfLiteParser::TensorRawPtr tensorPtr,
+CreateConstTensorImpl(TfLiteParserImpl::BufferRawPtr bufferPtr,
+                      TfLiteParserImpl::TensorRawPtr tensorPtr,
                       armnn::TensorInfo& tensorInfo,
                       armnn::Optional<armnn::PermutationVector&> permutationVector)
 {
@@ -536,84 +594,84 @@
 
 } // <anonymous>
 
-TfLiteParser::TfLiteParser(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
+TfLiteParserImpl::TfLiteParserImpl(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
 : m_Options(options)
 , m_Network(nullptr, nullptr)
-, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParser::ParseUnsupportedOperator)
+, m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
 {
     // register supported operators
-    m_ParserFunctions[tflite::BuiltinOperator_ADD]                     = &TfLiteParser::ParseAdd;
-    m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D]         = &TfLiteParser::ParseAveragePool2D;
-    m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND]       = &TfLiteParser::ParseBatchToSpaceND;
-    m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION]           = &TfLiteParser::ParseConcatenation;
-    m_ParserFunctions[tflite::BuiltinOperator_CONV_2D]                 = &TfLiteParser::ParseConv2D;
-    m_ParserFunctions[tflite::BuiltinOperator_CUSTOM]                  = &TfLiteParser::ParseCustomOperator;
-    m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE]          = &TfLiteParser::ParseDepthToSpace;
-    m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D]       = &TfLiteParser::ParseDepthwiseConv2D;
-    m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE]              = &TfLiteParser::ParseDequantize;
-    m_ParserFunctions[tflite::BuiltinOperator_ELU]                     = &TfLiteParser::ParseElu;
-    m_ParserFunctions[tflite::BuiltinOperator_EXP]                     = &TfLiteParser::ParseExp;
-    m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED]         = &TfLiteParser::ParseFullyConnected;
-    m_ParserFunctions[tflite::BuiltinOperator_GATHER]                  = &TfLiteParser::ParseGather;
-    m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH]              = &TfLiteParser::ParseHardSwish;
-    m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU]              = &TfLiteParser::ParseLeakyRelu;
-    m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC]                = &TfLiteParser::ParseLogistic;
-    m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION]        = &TfLiteParser::ParseL2Normalization;
-    m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D]             = &TfLiteParser::ParseMaxPool2D;
-    m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM]                 = &TfLiteParser::ParseMaximum;
-    m_ParserFunctions[tflite::BuiltinOperator_MEAN]                    = &TfLiteParser::ParseMean;
-    m_ParserFunctions[tflite::BuiltinOperator_MINIMUM]                 = &TfLiteParser::ParseMinimum;
-    m_ParserFunctions[tflite::BuiltinOperator_MUL]                     = &TfLiteParser::ParseMul;
-    m_ParserFunctions[tflite::BuiltinOperator_NEG]                     = &TfLiteParser::ParseNeg;
-    m_ParserFunctions[tflite::BuiltinOperator_PACK]                    = &TfLiteParser::ParsePack;
-    m_ParserFunctions[tflite::BuiltinOperator_PAD]                     = &TfLiteParser::ParsePad;
-    m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE]                = &TfLiteParser::ParseQuantize;
-    m_ParserFunctions[tflite::BuiltinOperator_RELU]                    = &TfLiteParser::ParseRelu;
-    m_ParserFunctions[tflite::BuiltinOperator_RELU6]                   = &TfLiteParser::ParseRelu6;
-    m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]                 = &TfLiteParser::ParseReshape;
-    m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR]         = &TfLiteParser::ParseResizeBilinear;
-    m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParser::ParseResizeNearestNeighbor;
-    m_ParserFunctions[tflite::BuiltinOperator_SLICE]                   = &TfLiteParser::ParseSlice;
-    m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]                 = &TfLiteParser::ParseSoftmax;
-    m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND]       = &TfLiteParser::ParseSpaceToBatchND;
-    m_ParserFunctions[tflite::BuiltinOperator_SPLIT]                   = &TfLiteParser::ParseSplit;
-    m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V]                 = &TfLiteParser::ParseSplitV;
-    m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]                 = &TfLiteParser::ParseSqueeze;
-    m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE]           = &TfLiteParser::ParseStridedSlice;
-    m_ParserFunctions[tflite::BuiltinOperator_SUB]                     = &TfLiteParser::ParseSub;
-    m_ParserFunctions[tflite::BuiltinOperator_SUM]                     = &TfLiteParser::ParseSum;
-    m_ParserFunctions[tflite::BuiltinOperator_TANH]                    = &TfLiteParser::ParseTanH;
-    m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE]               = &TfLiteParser::ParseTranspose;
-    m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV]          = &TfLiteParser::ParseTransposeConv;
-    m_ParserFunctions[tflite::BuiltinOperator_UNPACK]                  = &TfLiteParser::ParseUnpack;
-    m_ParserFunctions[tflite::BuiltinOperator_DIV]                     = &TfLiteParser::ParseDiv;
-    m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX]                 = &TfLiteParser::ParseArgMax;
+    m_ParserFunctions[tflite::BuiltinOperator_ADD]                     = &TfLiteParserImpl::ParseAdd;
+    m_ParserFunctions[tflite::BuiltinOperator_AVERAGE_POOL_2D]         = &TfLiteParserImpl::ParseAveragePool2D;
+    m_ParserFunctions[tflite::BuiltinOperator_BATCH_TO_SPACE_ND]       = &TfLiteParserImpl::ParseBatchToSpaceND;
+    m_ParserFunctions[tflite::BuiltinOperator_CONCATENATION]           = &TfLiteParserImpl::ParseConcatenation;
+    m_ParserFunctions[tflite::BuiltinOperator_CONV_2D]                 = &TfLiteParserImpl::ParseConv2D;
+    m_ParserFunctions[tflite::BuiltinOperator_CUSTOM]                  = &TfLiteParserImpl::ParseCustomOperator;
+    m_ParserFunctions[tflite::BuiltinOperator_DEPTH_TO_SPACE]          = &TfLiteParserImpl::ParseDepthToSpace;
+    m_ParserFunctions[tflite::BuiltinOperator_DEPTHWISE_CONV_2D]       = &TfLiteParserImpl::ParseDepthwiseConv2D;
+    m_ParserFunctions[tflite::BuiltinOperator_DEQUANTIZE]              = &TfLiteParserImpl::ParseDequantize;
+    m_ParserFunctions[tflite::BuiltinOperator_ELU]                     = &TfLiteParserImpl::ParseElu;
+    m_ParserFunctions[tflite::BuiltinOperator_EXP]                     = &TfLiteParserImpl::ParseExp;
+    m_ParserFunctions[tflite::BuiltinOperator_FULLY_CONNECTED]         = &TfLiteParserImpl::ParseFullyConnected;
+    m_ParserFunctions[tflite::BuiltinOperator_GATHER]                  = &TfLiteParserImpl::ParseGather;
+    m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH]              = &TfLiteParserImpl::ParseHardSwish;
+    m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU]              = &TfLiteParserImpl::ParseLeakyRelu;
+    m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC]                = &TfLiteParserImpl::ParseLogistic;
+    m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION]        = &TfLiteParserImpl::ParseL2Normalization;
+    m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D]             = &TfLiteParserImpl::ParseMaxPool2D;
+    m_ParserFunctions[tflite::BuiltinOperator_MAXIMUM]                 = &TfLiteParserImpl::ParseMaximum;
+    m_ParserFunctions[tflite::BuiltinOperator_MEAN]                    = &TfLiteParserImpl::ParseMean;
+    m_ParserFunctions[tflite::BuiltinOperator_MINIMUM]                 = &TfLiteParserImpl::ParseMinimum;
+    m_ParserFunctions[tflite::BuiltinOperator_MUL]                     = &TfLiteParserImpl::ParseMul;
+    m_ParserFunctions[tflite::BuiltinOperator_NEG]                     = &TfLiteParserImpl::ParseNeg;
+    m_ParserFunctions[tflite::BuiltinOperator_PACK]                    = &TfLiteParserImpl::ParsePack;
+    m_ParserFunctions[tflite::BuiltinOperator_PAD]                     = &TfLiteParserImpl::ParsePad;
+    m_ParserFunctions[tflite::BuiltinOperator_QUANTIZE]                = &TfLiteParserImpl::ParseQuantize;
+    m_ParserFunctions[tflite::BuiltinOperator_RELU]                    = &TfLiteParserImpl::ParseRelu;
+    m_ParserFunctions[tflite::BuiltinOperator_RELU6]                   = &TfLiteParserImpl::ParseRelu6;
+    m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]                 = &TfLiteParserImpl::ParseReshape;
+    m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR]         = &TfLiteParserImpl::ParseResizeBilinear;
+    m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
+    m_ParserFunctions[tflite::BuiltinOperator_SLICE]                   = &TfLiteParserImpl::ParseSlice;
+    m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]                 = &TfLiteParserImpl::ParseSoftmax;
+    m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND]       = &TfLiteParserImpl::ParseSpaceToBatchND;
+    m_ParserFunctions[tflite::BuiltinOperator_SPLIT]                   = &TfLiteParserImpl::ParseSplit;
+    m_ParserFunctions[tflite::BuiltinOperator_SPLIT_V]                 = &TfLiteParserImpl::ParseSplitV;
+    m_ParserFunctions[tflite::BuiltinOperator_SQUEEZE]                 = &TfLiteParserImpl::ParseSqueeze;
+    m_ParserFunctions[tflite::BuiltinOperator_STRIDED_SLICE]           = &TfLiteParserImpl::ParseStridedSlice;
+    m_ParserFunctions[tflite::BuiltinOperator_SUB]                     = &TfLiteParserImpl::ParseSub;
+    m_ParserFunctions[tflite::BuiltinOperator_SUM]                     = &TfLiteParserImpl::ParseSum;
+    m_ParserFunctions[tflite::BuiltinOperator_TANH]                    = &TfLiteParserImpl::ParseTanH;
+    m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE]               = &TfLiteParserImpl::ParseTranspose;
+    m_ParserFunctions[tflite::BuiltinOperator_TRANSPOSE_CONV]          = &TfLiteParserImpl::ParseTransposeConv;
+    m_ParserFunctions[tflite::BuiltinOperator_UNPACK]                  = &TfLiteParserImpl::ParseUnpack;
+    m_ParserFunctions[tflite::BuiltinOperator_DIV]                     = &TfLiteParserImpl::ParseDiv;
+    m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX]                 = &TfLiteParserImpl::ParseArgMax;
     // register supported custom operators
-    m_CustomParserFunctions["TFLite_Detection_PostProcess"]      = &TfLiteParser::ParseDetectionPostProcess;
+    m_CustomParserFunctions["TFLite_Detection_PostProcess"]      = &TfLiteParserImpl::ParseDetectionPostProcess;
 }
 
-void TfLiteParser::ResetParser()
+void TfLiteParserImpl::ResetParser()
 {
     m_Network = armnn::INetworkPtr(nullptr, nullptr);
     m_Model = nullptr;
     m_SubgraphConnections.clear();
 }
 
-INetworkPtr TfLiteParser::CreateNetworkFromBinaryFile(const char* graphFile)
+INetworkPtr TfLiteParserImpl::CreateNetworkFromBinaryFile(const char* graphFile)
 {
     ResetParser();
     m_Model = LoadModelFromFile(graphFile);
     return CreateNetworkFromModel();
 }
 
-INetworkPtr TfLiteParser::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
+INetworkPtr TfLiteParserImpl::CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent)
 {
     ResetParser();
     m_Model = LoadModelFromBinary(binaryContent.data(), binaryContent.size());
     return CreateNetworkFromModel();
 }
 
-INetworkPtr TfLiteParser::CreateNetworkFromModel()
+INetworkPtr TfLiteParserImpl::CreateNetworkFromModel()
 {
 
     using NetworkOptions = std::vector<BackendOptions>;
@@ -705,9 +763,9 @@
     return std::move(m_Network);
 }
 
-void TfLiteParser::RegisterProducerOfTensor(size_t subgraphIndex,
-                                            size_t tensorIndex,
-                                            armnn::IOutputSlot* slot)
+void TfLiteParserImpl::RegisterProducerOfTensor(size_t subgraphIndex,
+                                                size_t tensorIndex,
+                                                armnn::IOutputSlot* slot)
 {
     CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
     ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
@@ -728,9 +786,9 @@
     tensorSlots.outputSlot = slot;
 }
 
-void TfLiteParser::RegisterConsumerOfTensor(size_t subgraphIndex,
-                                            size_t tensorIndex,
-                                            armnn::IInputSlot* slot)
+void TfLiteParserImpl::RegisterConsumerOfTensor(size_t subgraphIndex,
+                                                size_t tensorIndex,
+                                                armnn::IInputSlot* slot)
 {
     CHECK_TENSOR(m_Model, subgraphIndex, tensorIndex);
     ARMNN_ASSERT(m_SubgraphConnections.size() > subgraphIndex);
@@ -740,12 +798,12 @@
     tensorSlots.inputSlots.push_back(slot);
 }
 
-void TfLiteParser::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
     // NOTE: By default we presume the custom operator is not supported
-    auto customParserFunction = &TfLiteParser::ParseUnsupportedOperator;
+    auto customParserFunction = &TfLiteParserImpl::ParseUnsupportedOperator;
 
     // Identify custom code defined for custom operator
     const auto& operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
@@ -762,7 +820,7 @@
     (this->*customParserFunction)(subgraphIndex, operatorIndex);
 }
 
-void TfLiteParser::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -811,7 +869,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIds);
 }
 
-void TfLiteParser::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseConv2D(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -894,7 +952,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseDepthwiseConv2D(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -981,7 +1039,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseDequantize(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1006,7 +1064,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-void TfLiteParser::ParseExp(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1033,7 +1091,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-void TfLiteParser::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1073,7 +1131,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseTransposeConv(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1186,12 +1244,12 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseAveragePool2D(size_t subgraphIndex, size_t operatorIndex)
 {
     ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Average);
 }
 
-void TfLiteParser::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseBatchToSpaceND(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1242,7 +1300,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseL2Normalization(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1269,12 +1327,12 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseMaxPool2D(size_t subgraphIndex, size_t operatorIndex)
 {
     ParsePool(subgraphIndex, operatorIndex, PoolingAlgorithm::Max);
 }
 
-void TfLiteParser::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseMaximum(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1304,7 +1362,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseMinimum(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1334,9 +1392,9 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParsePool(size_t subgraphIndex,
-                             size_t operatorIndex,
-                             PoolingAlgorithm algorithm)
+void TfLiteParserImpl::ParsePool(size_t subgraphIndex,
+                                 size_t operatorIndex,
+                                 PoolingAlgorithm algorithm)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1406,7 +1464,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseSlice(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1451,7 +1509,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseSoftmax(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
     const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
@@ -1481,7 +1539,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseSpaceToBatchND(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1532,8 +1590,8 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-armnn::TensorInfo TfLiteParser::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
-                                                     const armnn::TensorInfo & inputTensorInfo)
+armnn::TensorInfo TfLiteParserImpl::OutputShapeOfSqueeze(const std::vector<uint32_t> & squeezeDimsIn,
+                                                         const armnn::TensorInfo & inputTensorInfo)
 {
     CHECK_VALID_SIZE(squeezeDimsIn.size(), 0, 1, 2, 3, 4);
     std::vector<uint32_t> squeezeDims = squeezeDimsIn;
@@ -1584,7 +1642,7 @@
     return outTensorInfo;
 }
 
-void TfLiteParser::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseSqueeze(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1600,7 +1658,7 @@
 
     armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
     armnn::TensorInfo outputTensorInfo =
-        TfLiteParser::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
+        TfLiteParserImpl::OutputShapeOfSqueeze(AsUnsignedVector(options->squeeze_dims),
                                            inputTensorInfo);
     CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
 
@@ -1618,7 +1676,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseStridedSlice(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1675,7 +1733,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseSub(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseSub(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1707,7 +1765,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseDiv(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1738,7 +1796,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseAdd(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1769,7 +1827,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseMul(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseMul(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1800,7 +1858,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseMean(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseMean(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1837,7 +1895,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
 {
   CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1862,13 +1920,13 @@
   RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-void TfLiteParser::ParsePad(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
-    TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
 
-    TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
@@ -1904,7 +1962,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseQuantize(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -1929,42 +1987,42 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-void TfLiteParser::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseRelu(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::ReLu);
 }
 
-void TfLiteParser::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseRelu6(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseActivation(subgraphIndex,operatorIndex, ActivationFunction::BoundedReLu);
 }
 
-void TfLiteParser::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseLeakyRelu(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::LeakyReLu);
 }
 
-void TfLiteParser::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseLogistic(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::Sigmoid);
 }
 
-void TfLiteParser::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseTanH(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseActivation(subgraphIndex,operatorIndex,ActivationFunction::TanH);
 }
 
-void TfLiteParser::ParseElu(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseElu(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::Elu);
 }
 
-void TfLiteParser::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseHardSwish(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseActivation(subgraphIndex, operatorIndex, ActivationFunction::HardSwish);
 }
 
-void TfLiteParser::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
+void TfLiteParserImpl::ParseActivation(size_t subgraphIndex, size_t operatorIndex, ActivationFunction activationType)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
     const auto & operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
@@ -2046,8 +2104,8 @@
     auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
-armnn::TensorInfo TfLiteParser::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
-                                                     const std::vector<int32_t> & targetDimsIn)
+armnn::TensorInfo TfLiteParserImpl::OutputShapeOfReshape(const armnn::TensorInfo & inputTensorInfo,
+                                                         const std::vector<int32_t> & targetDimsIn)
 {
     std::vector<unsigned int> outputDims(targetDimsIn.begin(), targetDimsIn.end());
     const auto stretchDim = std::find(targetDimsIn.begin(), targetDimsIn.end(), -1);
@@ -2076,7 +2134,7 @@
     return reshapeInfo;
 }
 
-void TfLiteParser::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseReshape(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2151,7 +2209,7 @@
     }
 
     armnn::TensorInfo reshapeOutputTensorInfo =
-        TfLiteParser::OutputShapeOfReshape(inputTensorInfo, targetShape);
+        TfLiteParserImpl::OutputShapeOfReshape(inputTensorInfo, targetShape);
 
     // Check for valid input size and that reshape parameters equal output shape
     const armnn::TensorShape& reshapeOutputTensorShape = reshapeOutputTensorInfo.GetShape();
@@ -2181,17 +2239,17 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseResizeBilinear(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseResize(subgraphIndex, operatorIndex, ResizeMethod::Bilinear);
 }
 
-void TfLiteParser::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseResizeNearestNeighbor(size_t subgraphIndex, size_t operatorIndex)
 {
     ParseResize(subgraphIndex, operatorIndex, ResizeMethod::NearestNeighbor);
 }
 
-void TfLiteParser::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
+void TfLiteParserImpl::ParseResize(size_t subgraphIndex, size_t operatorIndex, ResizeMethod resizeMethod)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2257,7 +2315,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-void TfLiteParser::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseConcatenation(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2307,7 +2365,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseFullyConnected(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2418,7 +2476,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, fusedActivationLayer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseDetectionPostProcess(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2497,7 +2555,7 @@
 }
 
 /// The TfLite Pack operator is equivalent to the ArmNN Stack operator
-void TfLiteParser::ParsePack(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParsePack(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2536,7 +2594,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseUnpack(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2638,7 +2696,7 @@
     }
 }
 
-void TfLiteParser::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseSplit(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2730,7 +2788,7 @@
     return static_cast<unsigned int>(v);
 }
 
-void TfLiteParser::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseSplitV(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -2866,7 +2924,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-void TfLiteParser::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseArgMax(size_t subgraphIndex, size_t operatorIndex)
 {
     const auto &operatorPtr = m_Model->subgraphs[subgraphIndex]->operators[operatorIndex];
     const auto *options = operatorPtr->builtin_options.AsArgMaxOptions();
@@ -2907,13 +2965,13 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-void TfLiteParser::ParseGather(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseGather(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
-    TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(inputs.size(), 2);
-    TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[0]);
@@ -2960,13 +3018,13 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseDepthToSpace(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
-    TfLiteParser::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    TfLiteParserImpl::TensorRawPtrVector inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(inputs.size(), 1);
-    TfLiteParser::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    TfLiteParserImpl::TensorRawPtrVector outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), 1);
 
     armnn::DepthToSpaceDescriptor descriptor;
@@ -2996,7 +3054,7 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParser::ParseSum(size_t subgraphIndex, size_t operatorIndex)
+void TfLiteParserImpl::ParseSum(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
 
@@ -3048,9 +3106,9 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-armnn::IConnectableLayer* TfLiteParser::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
-                                                                unsigned int outputSlot,
-                                                                tflite::ActivationFunctionType activationType)
+armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
+                                                                    unsigned int outputSlot,
+                                                                    tflite::ActivationFunctionType activationType)
 {
     ActivationDescriptor activationDesc;
     std::string layerName = prevLayer->GetName();
@@ -3109,7 +3167,7 @@
     return activationLayer;
 }
 
-TfLiteParser::ModelPtr TfLiteParser::LoadModelFromFile(const char * fileName)
+TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromFile(const char * fileName)
 {
     if (fileName == nullptr)
     {
@@ -3133,7 +3191,7 @@
                                fileContent.size());
 }
 
-TfLiteParser::ModelPtr TfLiteParser::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
+TfLiteParserImpl::ModelPtr TfLiteParserImpl::LoadModelFromBinary(const uint8_t * binaryContent, size_t len)
 {
     if (binaryContent == nullptr)
      {
@@ -3152,9 +3210,9 @@
     return tflite::UnPackModel(binaryContent);
 }
 
-TfLiteParser::TensorRawPtrVector TfLiteParser::GetInputs(const ModelPtr & model,
-                                                         size_t subgraphIndex,
-                                                         size_t operatorIndex)
+TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetInputs(const ModelPtr & model,
+                                                                 size_t subgraphIndex,
+                                                                 size_t operatorIndex)
 {
     CHECK_MODEL(model, subgraphIndex, operatorIndex);
 
@@ -3171,9 +3229,9 @@
     return result;
 }
 
-TfLiteParser::TensorRawPtrVector TfLiteParser::GetOutputs(const ModelPtr & model,
-                                                          size_t subgraphIndex,
-                                                          size_t operatorIndex)
+TfLiteParserImpl::TensorRawPtrVector TfLiteParserImpl::GetOutputs(const ModelPtr & model,
+                                                                  size_t subgraphIndex,
+                                                                  size_t operatorIndex)
 {
     CHECK_MODEL(model, subgraphIndex, operatorIndex);
 
@@ -3191,8 +3249,8 @@
     return result;
 }
 
-TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphInputs(const ModelPtr & model,
-                                                                   size_t subgraphIndex)
+TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphInputs(const ModelPtr & model,
+                                                                           size_t subgraphIndex)
 {
     CHECK_SUBGRAPH(model, subgraphIndex);
     const auto & subgraphPtr = model->subgraphs[subgraphIndex];
@@ -3208,8 +3266,8 @@
     return result;
 }
 
-TfLiteParser::TensorIdRawPtrVector TfLiteParser::GetSubgraphOutputs(const ModelPtr & model,
-                                                                    size_t subgraphIndex)
+TfLiteParserImpl::TensorIdRawPtrVector TfLiteParserImpl::GetSubgraphOutputs(const ModelPtr & model,
+                                                                            size_t subgraphIndex)
 {
     CHECK_SUBGRAPH(model, subgraphIndex);
     const auto & subgraphPtr = model->subgraphs[subgraphIndex];
@@ -3224,9 +3282,9 @@
     return result;
 }
 
-std::vector<int32_t>& TfLiteParser::GetInputTensorIds(const ModelPtr& model,
-                                                      size_t subgraphIndex,
-                                                      size_t operatorIndex)
+std::vector<int32_t>& TfLiteParserImpl::GetInputTensorIds(const ModelPtr& model,
+                                                          size_t subgraphIndex,
+                                                          size_t operatorIndex)
 {
     CHECK_MODEL(model, subgraphIndex, operatorIndex);
     const auto & subgraphPtr = model->subgraphs[subgraphIndex];
@@ -3234,9 +3292,9 @@
     return operatorPtr->inputs;
 }
 
-std::vector<int32_t>& TfLiteParser::GetOutputTensorIds(const ModelPtr& model,
-                                                       size_t subgraphIndex,
-                                                       size_t operatorIndex)
+std::vector<int32_t>& TfLiteParserImpl::GetOutputTensorIds(const ModelPtr& model,
+                                                           size_t subgraphIndex,
+                                                           size_t operatorIndex)
 {
     CHECK_MODEL(model, subgraphIndex, operatorIndex);
     const auto & subgraphPtr = model->subgraphs[subgraphIndex];
@@ -3244,10 +3302,10 @@
     return operatorPtr->outputs;
 }
 
-void TfLiteParser::RegisterInputSlots(size_t subgraphIndex,
-                                      size_t operatorIndex,
-                                      IConnectableLayer* layer,
-                                      const std::vector<unsigned int>& tensorIndexes)
+void TfLiteParserImpl::RegisterInputSlots(size_t subgraphIndex,
+                                          size_t operatorIndex,
+                                          IConnectableLayer* layer,
+                                          const std::vector<unsigned int>& tensorIndexes)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
     ARMNN_ASSERT(layer != nullptr);
@@ -3271,10 +3329,10 @@
     }
 }
 
-void TfLiteParser::RegisterOutputSlots(size_t subgraphIndex,
-                                       size_t operatorIndex,
-                                       IConnectableLayer* layer,
-                                       const std::vector<unsigned int>& tensorIndexes)
+void TfLiteParserImpl::RegisterOutputSlots(size_t subgraphIndex,
+                                           size_t operatorIndex,
+                                           IConnectableLayer* layer,
+                                           const std::vector<unsigned int>& tensorIndexes)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
     ARMNN_ASSERT(layer != nullptr);
@@ -3298,7 +3356,7 @@
     }
 }
 
-void TfLiteParser::SetupInputLayers(size_t subgraphIndex)
+void TfLiteParserImpl::SetupInputLayers(size_t subgraphIndex)
 {
     CHECK_SUBGRAPH(m_Model, subgraphIndex);
 
@@ -3319,7 +3377,7 @@
     }
 }
 
-void TfLiteParser::SetupOutputLayers(size_t subgraphIndex)
+void TfLiteParserImpl::SetupOutputLayers(size_t subgraphIndex)
 {
     CHECK_SUBGRAPH(m_Model, subgraphIndex);
 
@@ -3337,7 +3395,7 @@
     }
 }
 
-void TfLiteParser::SetupConstantLayers(size_t subgraphIndex)
+void TfLiteParserImpl::SetupConstantLayers(size_t subgraphIndex)
 {
     CHECK_SUBGRAPH(m_Model, subgraphIndex);
 
@@ -3371,16 +3429,16 @@
 }
 
 // example usage: BufferRawPtr bufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
-TfLiteParser::BufferRawPtr TfLiteParser::GetBuffer(const ModelPtr& model, size_t bufferIndex)
+TfLiteParserImpl::BufferRawPtr TfLiteParserImpl::GetBuffer(const ModelPtr& model, size_t bufferIndex)
 {
     CHECK_BUFFER(model, bufferIndex);
     return model->buffers[bufferIndex].get();
 }
 
 template<typename T>
-std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
-TfLiteParser::CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
-                                            TfLiteParser::TensorRawPtr tensorPtr,
+std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
+TfLiteParserImpl::CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
+                                            TfLiteParserImpl::TensorRawPtr tensorPtr,
                                             armnn::TensorInfo& tensorInfo,
                                             armnn::Optional<armnn::PermutationVector&> permutationVector)
 {
@@ -3388,12 +3446,12 @@
                                               tensorPtr,
                                               tensorInfo,
                                               permutationVector);
-    TfLiteParser::SupportedDataStorage storage(std::move(constData.second));
+    TfLiteParserImpl::SupportedDataStorage storage(std::move(constData.second));
     return std::make_pair(constData.first, std::move(storage));
 }
 
-std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
-TfLiteParser::CreateConstTensor(TensorRawPtr tensorPtr,
+std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
+TfLiteParserImpl::CreateConstTensor(TensorRawPtr tensorPtr,
                                 armnn::TensorInfo& tensorInfo,
                                 armnn::Optional<armnn::PermutationVector&> permutationVector)
 {
@@ -3440,8 +3498,8 @@
     }
 }
 
-BindingPointInfo TfLiteParser::GetNetworkInputBindingInfo(size_t subgraphId,
-                                                          const std::string& name) const
+BindingPointInfo TfLiteParserImpl::GetNetworkInputBindingInfo(size_t subgraphId,
+                                                              const std::string& name) const
 {
     CHECK_SUBGRAPH(m_Model, subgraphId);
     auto inputs = GetSubgraphInputs(m_Model, subgraphId);
@@ -3469,8 +3527,8 @@
                     CHECK_LOCATION().AsString()));
 }
 
-BindingPointInfo TfLiteParser::GetNetworkOutputBindingInfo(size_t subgraphId,
-                                                           const std::string& name) const
+BindingPointInfo TfLiteParserImpl::GetNetworkOutputBindingInfo(size_t subgraphId,
+                                                               const std::string& name) const
 {
     CHECK_SUBGRAPH(m_Model, subgraphId);
     auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
@@ -3501,12 +3559,12 @@
                     CHECK_LOCATION().AsString()));
 }
 
-size_t TfLiteParser::GetSubgraphCount() const
+size_t TfLiteParserImpl::GetSubgraphCount() const
 {
     return m_Model->subgraphs.size();
 }
 
-std::vector<std::string> TfLiteParser::GetSubgraphInputTensorNames(size_t subgraphId) const
+std::vector<std::string> TfLiteParserImpl::GetSubgraphInputTensorNames(size_t subgraphId) const
 {
     CHECK_SUBGRAPH(m_Model, subgraphId);
     auto inputs = GetSubgraphInputs(m_Model, subgraphId);
@@ -3519,7 +3577,7 @@
     return result;
 }
 
-std::vector<std::string> TfLiteParser::GetSubgraphOutputTensorNames(size_t subgraphId) const
+std::vector<std::string> TfLiteParserImpl::GetSubgraphOutputTensorNames(size_t subgraphId) const
 {
     CHECK_SUBGRAPH(m_Model, subgraphId);
     auto outputs = GetSubgraphOutputs(m_Model, subgraphId);
@@ -3532,22 +3590,7 @@
     return result;
 }
 
-ITfLiteParser* ITfLiteParser::CreateRaw(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
-{
-    return new TfLiteParser(options);
-}
-
-ITfLiteParserPtr ITfLiteParser::Create(const Optional<ITfLiteParser::TfLiteParserOptions>& options)
-{
-    return ITfLiteParserPtr(CreateRaw(options), &ITfLiteParser::Destroy);
-}
-
-void ITfLiteParser::Destroy(ITfLiteParser* parser)
-{
-    delete parser;
-}
-
-TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
+TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<float[]> && data)
 : m_FloatData(std::move(data))
 , m_Uint8Data(nullptr)
 , m_Int8Data(nullptr)
@@ -3555,7 +3598,7 @@
 {
 }
 
-TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
+TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<uint8_t[]> && data)
 : m_FloatData(nullptr)
 , m_Uint8Data(std::move(data))
 , m_Int8Data(nullptr)
@@ -3563,7 +3606,7 @@
 {
 }
 
-TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
+TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int8_t[]> && data)
 : m_FloatData(nullptr)
 , m_Uint8Data(nullptr)
 , m_Int8Data(std::move(data))
@@ -3571,7 +3614,7 @@
 {
 }
 
-TfLiteParser::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
+TfLiteParserImpl::SupportedDataStorage::SupportedDataStorage(std::unique_ptr<int32_t[]> && data)
 : m_FloatData(nullptr)
 , m_Uint8Data(nullptr)
 , m_Int8Data(nullptr)
diff --git a/src/armnnTfLiteParser/TfLiteParser.hpp b/src/armnnTfLiteParser/TfLiteParser.hpp
index 5f18060..12a085d 100644
--- a/src/armnnTfLiteParser/TfLiteParser.hpp
+++ b/src/armnnTfLiteParser/TfLiteParser.hpp
@@ -16,7 +16,7 @@
 namespace armnnTfLiteParser
 {
 
-class TfLiteParser : public ITfLiteParser
+class TfLiteParserImpl
 {
 public:
     // Shorthands for TfLite types
@@ -34,33 +34,33 @@
 
 public:
     /// Create the network from a flatbuffers binary file on disk
-    virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile) override;
+    armnn::INetworkPtr CreateNetworkFromBinaryFile(const char* graphFile);
 
     /// Create the network from a flatbuffers binary
-    virtual armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent) override;
+    armnn::INetworkPtr CreateNetworkFromBinary(const std::vector<uint8_t> & binaryContent);
 
 
     /// Retrieve binding info (layer id and tensor info) for the network input identified by
     /// the given layer name and subgraph id
-    virtual BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId,
-                                                        const std::string& name) const override;
+    BindingPointInfo GetNetworkInputBindingInfo(size_t subgraphId,
+                                                const std::string& name) const;
 
     /// Retrieve binding info (layer id and tensor info) for the network output identified by
     /// the given layer name and subgraph id
-    virtual BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId,
-                                                         const std::string& name) const override;
+    BindingPointInfo GetNetworkOutputBindingInfo(size_t subgraphId,
+                                                         const std::string& name) const;
 
     /// Return the number of subgraphs in the parsed model
-    virtual size_t GetSubgraphCount() const override;
+    size_t GetSubgraphCount() const;
 
     /// Return the input tensor names for a given subgraph
-    virtual std::vector<std::string> GetSubgraphInputTensorNames(size_t subgraphId) const override;
+    std::vector<std::string> GetSubgraphInputTensorNames(size_t subgraphId) const;
 
     /// Return the output tensor names for a given subgraph
-    virtual std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId) const override;
+    std::vector<std::string> GetSubgraphOutputTensorNames(size_t subgraphId) const;
 
-    TfLiteParser(const armnn::Optional<ITfLiteParser::TfLiteParserOptions>& options = armnn::EmptyOptional());
-    virtual ~TfLiteParser() {}
+    TfLiteParserImpl(const armnn::Optional<ITfLiteParser::TfLiteParserOptions>& options = armnn::EmptyOptional());
+    ~TfLiteParserImpl() = default;
 
 public:
     // testable helpers
@@ -81,14 +81,14 @@
 
 private:
     // No copying allowed until it is wanted and properly implemented
-    TfLiteParser(const TfLiteParser &) = delete;
-    TfLiteParser & operator=(const TfLiteParser &) = delete;
+    TfLiteParserImpl(const TfLiteParserImpl &) = delete;
+    TfLiteParserImpl & operator=(const TfLiteParserImpl &) = delete;
 
     /// Create the network from an already loaded flatbuffers model
     armnn::INetworkPtr CreateNetworkFromModel();
 
     // signature for the parser functions
-    using OperatorParsingFunction = void(TfLiteParser::*)(size_t subgraphIndex, size_t operatorIndex);
+    using OperatorParsingFunction = void(TfLiteParserImpl::*)(size_t subgraphIndex, size_t operatorIndex);
 
     void ParseCustomOperator(size_t subgraphIndex, size_t operatorIndex);
     void ParseUnsupportedOperator(size_t subgraphIndex, size_t operatorIndex);
@@ -190,9 +190,9 @@
 
 
     template<typename T>
-    std::pair<armnn::ConstTensor, TfLiteParser::SupportedDataStorage>
-    CreateConstTensorAndStoreData(TfLiteParser::BufferRawPtr bufferPtr,
-                                  TfLiteParser::TensorRawPtr tensorPtr,
+    std::pair<armnn::ConstTensor, TfLiteParserImpl::SupportedDataStorage>
+    CreateConstTensorAndStoreData(TfLiteParserImpl::BufferRawPtr bufferPtr,
+                                  TfLiteParserImpl::TensorRawPtr tensorPtr,
                                   armnn::TensorInfo& tensorInfo,
                                   armnn::Optional<armnn::PermutationVector&> permutationVector);
 
diff --git a/src/armnnTfLiteParser/test/Constant.cpp b/src/armnnTfLiteParser/test/Constant.cpp
index cc89223..bfb76a9 100644
--- a/src/armnnTfLiteParser/test/Constant.cpp
+++ b/src/armnnTfLiteParser/test/Constant.cpp
@@ -10,7 +10,7 @@
 #include <string>
 #include <iostream>
 
-using armnnTfLiteParser::TfLiteParser;
+using armnnTfLiteParser::TfLiteParserImpl;
 
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
diff --git a/src/armnnTfLiteParser/test/GetBuffer.cpp b/src/armnnTfLiteParser/test/GetBuffer.cpp
index cccdbce..0e72522 100644
--- a/src/armnnTfLiteParser/test/GetBuffer.cpp
+++ b/src/armnnTfLiteParser/test/GetBuffer.cpp
@@ -8,7 +8,7 @@
 #include "../TfLiteParser.hpp"
 #include <sstream>
 
-using armnnTfLiteParser::TfLiteParser;
+using armnnTfLiteParser::TfLiteParserImpl;
 
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
@@ -88,12 +88,12 @@
         ReadStringToBinary();
     }
 
-    void CheckBufferContents(const TfLiteParser::ModelPtr& model,
+    void CheckBufferContents(const TfLiteParserImpl::ModelPtr& model,
                              std::vector<int32_t> bufferValues, size_t bufferIndex)
     {
         for(long unsigned int i=0; i<bufferValues.size(); i++)
         {
-            BOOST_CHECK_EQUAL(TfLiteParser::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
+            BOOST_CHECK_EQUAL(TfLiteParserImpl::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
         }
     }
 };
@@ -101,7 +101,8 @@
 BOOST_FIXTURE_TEST_CASE(GetBufferCheckContents, GetBufferFixture)
 {
     //Check contents of buffer are correct
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
     std::vector<int32_t> bufferValues = {2,1,0,6,2,1,4,1,2};
     CheckBufferContents(model, bufferValues, 2);
 }
@@ -109,18 +110,20 @@
 BOOST_FIXTURE_TEST_CASE(GetBufferCheckEmpty, GetBufferFixture)
 {
     //Check if test fixture buffers are empty or not
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK(TfLiteParser::GetBuffer(model, 0)->data.empty());
-    BOOST_CHECK(TfLiteParser::GetBuffer(model, 1)->data.empty());
-    BOOST_CHECK(!TfLiteParser::GetBuffer(model, 2)->data.empty());
-    BOOST_CHECK(TfLiteParser::GetBuffer(model, 3)->data.empty());
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 0)->data.empty());
+    BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 1)->data.empty());
+    BOOST_CHECK(!TfLiteParserImpl::GetBuffer(model, 2)->data.empty());
+    BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 3)->data.empty());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetBufferCheckParseException, GetBufferFixture)
 {
     //Check if armnn::ParseException thrown when invalid buffer index used
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetBuffer(model, 4), armnn::Exception);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetBuffer(model, 4), armnn::Exception);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
index 8247978..894de0c 100644
--- a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
+++ b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
@@ -6,8 +6,8 @@
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
-using armnnTfLiteParser::TfLiteParser;
-using ModelPtr = TfLiteParser::ModelPtr;
+using armnnTfLiteParser::TfLiteParserImpl;
+using ModelPtr = TfLiteParserImpl::ModelPtr;
 
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
@@ -152,22 +152,25 @@
 
 BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 0, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
     BOOST_CHECK_EQUAL(0, tensors.size());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetEmptyOutputs, GetEmptyInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 0, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
     BOOST_CHECK_EQUAL(0, tensors.size());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetInputs, GetInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 0, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
     BOOST_CHECK_EQUAL(1, tensors.size());
     CheckTensors(tensors[0], 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
                       "InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
@@ -175,8 +178,9 @@
 
 BOOST_FIXTURE_TEST_CASE(GetOutputs, GetInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 0, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
     BOOST_CHECK_EQUAL(1, tensors.size());
     CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
                       "OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
@@ -184,8 +188,9 @@
 
 BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetInputs(model, 1, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 1, 0);
     BOOST_CHECK_EQUAL(2, tensors.size());
     CheckTensors(tensors[0], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
                       "ConvInputTensor", { }, { }, { 1.0f }, { 0 });
@@ -195,8 +200,9 @@
 
 BOOST_FIXTURE_TEST_CASE(GetOutputs2, GetInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorRawPtrVector tensors = TfLiteParser::GetOutputs(model, 1, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 1, 0);
     BOOST_CHECK_EQUAL(1, tensors.size());
     CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
                       "ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
@@ -204,36 +210,40 @@
 
 BOOST_AUTO_TEST_CASE(GetInputsNullModel)
 {
-    BOOST_CHECK_THROW(TfLiteParser::GetInputs(nullptr, 0, 0), armnn::ParseException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(nullptr, 0, 0), armnn::ParseException);
 }
 
 BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
 {
-    BOOST_CHECK_THROW(TfLiteParser::GetOutputs(nullptr, 0, 0), armnn::ParseException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(nullptr, 0, 0), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetInputsInvalidSubgraph, GetInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetInputs(model, 2, 0), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 2, 0), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidSubgraph, GetInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetOutputs(model, 2, 0), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 2, 0), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetInputsInvalidOperator, GetInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetInputs(model, 0, 1), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 0, 1), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidOperator, GetInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetOutputs(model, 0, 1), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 0, 1), armnn::ParseException);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
index e0fbd35..100e8e9 100644
--- a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
+++ b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
@@ -6,9 +6,9 @@
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
-using armnnTfLiteParser::TfLiteParser;
-using ModelPtr = TfLiteParser::ModelPtr;
-using TensorRawPtr = TfLiteParser::TensorRawPtr;
+using armnnTfLiteParser::TfLiteParserImpl;
+using ModelPtr = TfLiteParserImpl::ModelPtr;
+using TensorRawPtr = TfLiteParserImpl::TensorRawPtr;
 
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
@@ -153,22 +153,25 @@
 
 BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphInputs, GetEmptySubgraphInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 0);
     BOOST_CHECK_EQUAL(0, subgraphTensors.size());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphOutputs, GetEmptySubgraphInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 0);
     BOOST_CHECK_EQUAL(0, subgraphTensors.size());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetSubgraphInputs, GetSubgraphInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 0);
     BOOST_CHECK_EQUAL(1, subgraphTensors.size());
     BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
     CheckTensors(subgraphTensors[0].second, 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
@@ -177,8 +180,9 @@
 
 BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsSimpleQuantized, GetSubgraphInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 0);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 0);
     BOOST_CHECK_EQUAL(1, subgraphTensors.size());
     BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
     CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
@@ -187,8 +191,9 @@
 
 BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsEmptyMinMax, GetSubgraphInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphInputs(model, 1);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 1);
     BOOST_CHECK_EQUAL(1, subgraphTensors.size());
     BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
     CheckTensors(subgraphTensors[0].second, 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
@@ -197,8 +202,9 @@
 
 BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputs, GetSubgraphInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    TfLiteParser::TensorIdRawPtrVector subgraphTensors = TfLiteParser::GetSubgraphOutputs(model, 1);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 1);
     BOOST_CHECK_EQUAL(1, subgraphTensors.size());
     BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
     CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
@@ -207,24 +213,26 @@
 
 BOOST_AUTO_TEST_CASE(GetSubgraphInputsNullModel)
 {
-    BOOST_CHECK_THROW(TfLiteParser::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
 }
 
 BOOST_AUTO_TEST_CASE(GetSubgraphOutputsNullModel)
 {
-    BOOST_CHECK_THROW(TfLiteParser::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetSubgraphInputs(model, 2), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphInputs(model, 2), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetSubgraphOutputs(model, 2), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphOutputs(model, 2), armnn::ParseException);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetTensorIds.cpp b/src/armnnTfLiteParser/test/GetTensorIds.cpp
index 6b82bb1..f45f6e6 100644
--- a/src/armnnTfLiteParser/test/GetTensorIds.cpp
+++ b/src/armnnTfLiteParser/test/GetTensorIds.cpp
@@ -6,8 +6,8 @@
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
-using armnnTfLiteParser::TfLiteParser;
-using ModelPtr = TfLiteParser::ModelPtr;
+using armnnTfLiteParser::TfLiteParserImpl;
+using ModelPtr = TfLiteParserImpl::ModelPtr;
 
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
@@ -91,72 +91,80 @@
 
 BOOST_FIXTURE_TEST_CASE(GetEmptyInputTensorIds, GetEmptyTensorIdsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
     std::vector<int32_t> expectedIds = { };
-    std::vector<int32_t> inputTensorIds = TfLiteParser::GetInputTensorIds(model, 0, 0);
+    std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
     BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
                                   inputTensorIds.begin(), inputTensorIds.end());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetEmptyOutputTensorIds, GetEmptyTensorIdsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
     std::vector<int32_t> expectedIds = { };
-    std::vector<int32_t> outputTensorIds = TfLiteParser::GetOutputTensorIds(model, 0, 0);
+    std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
     BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
                                   outputTensorIds.begin(), outputTensorIds.end());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetInputTensorIds, GetInputOutputTensorIdsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
     std::vector<int32_t> expectedInputIds = { 0, 1, 2 };
-    std::vector<int32_t> inputTensorIds = TfLiteParser::GetInputTensorIds(model, 0, 0);
+    std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
     BOOST_CHECK_EQUAL_COLLECTIONS(expectedInputIds.begin(), expectedInputIds.end(),
                                   inputTensorIds.begin(), inputTensorIds.end());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetOutputTensorIds, GetInputOutputTensorIdsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
     std::vector<int32_t> expectedOutputIds = { 3 };
-    std::vector<int32_t> outputTensorIds = TfLiteParser::GetOutputTensorIds(model, 0, 0);
+    std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
     BOOST_CHECK_EQUAL_COLLECTIONS(expectedOutputIds.begin(), expectedOutputIds.end(),
                                   outputTensorIds.begin(), outputTensorIds.end());
 }
 
 BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
 {
-    BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
 {
-    BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(model, 1, 0), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 1, 0), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetInputTensorIds(model, 0, 1), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 0, 1), armnn::ParseException);
 }
 
 BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParser::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
+    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/LoadModel.cpp b/src/armnnTfLiteParser/test/LoadModel.cpp
index 9777333..1afb5f1 100644
--- a/src/armnnTfLiteParser/test/LoadModel.cpp
+++ b/src/armnnTfLiteParser/test/LoadModel.cpp
@@ -8,10 +8,10 @@
 
 #include <Filesystem.hpp>
 
-using armnnTfLiteParser::TfLiteParser;
-using ModelPtr = TfLiteParser::ModelPtr;
-using SubgraphPtr = TfLiteParser::SubgraphPtr;
-using OperatorPtr = TfLiteParser::OperatorPtr;
+using armnnTfLiteParser::TfLiteParserImpl;
+using ModelPtr = TfLiteParserImpl::ModelPtr;
+using SubgraphPtr = TfLiteParserImpl::SubgraphPtr;
+using OperatorPtr = TfLiteParserImpl::OperatorPtr;
 
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
@@ -185,7 +185,8 @@
 
 BOOST_FIXTURE_TEST_CASE(LoadModelFromBinary, LoadModelFixture)
 {
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromBinary(m_GraphBinary.data(), m_GraphBinary.size());
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
+                                                                             m_GraphBinary.size());
     CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
                2, "Test loading a model", 2);
     CheckSubgraph(model->subgraphs[0], 2, { 1 }, { 0 }, 1, "");
@@ -205,7 +206,7 @@
                                        m_GraphBinary.size(), true);
     BOOST_CHECK_MESSAGE(saved, "Cannot save test file");
 
-    TfLiteParser::ModelPtr model = TfLiteParser::LoadModelFromFile(fname.c_str());
+    TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromFile(fname.c_str());
     CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
                2, "Test loading a model", 2);
     CheckSubgraph(model->subgraphs[0], 2, { 1 }, { 0 }, 1, "");
@@ -219,24 +220,24 @@
 
 BOOST_AUTO_TEST_CASE(LoadNullBinary)
 {
-    BOOST_CHECK_THROW(TfLiteParser::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
 }
 
 BOOST_AUTO_TEST_CASE(LoadInvalidBinary)
 {
     std::string testData = "invalid data";
-    BOOST_CHECK_THROW(TfLiteParser::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
+    BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
                                                         testData.length()), armnn::ParseException);
 }
 
 BOOST_AUTO_TEST_CASE(LoadFileNotFound)
 {
-    BOOST_CHECK_THROW(TfLiteParser::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
 }
 
 BOOST_AUTO_TEST_CASE(LoadNullPtrFile)
 {
-    BOOST_CHECK_THROW(TfLiteParser::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
+    BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
index 26cd92a..e616158 100644
--- a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
+++ b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
@@ -11,12 +11,10 @@
 struct TfLiteParserFixture
 {
 
-    armnnTfLiteParser::TfLiteParser m_Parser;
+    armnnTfLiteParser::TfLiteParserImpl m_Parser;
     unsigned int m_InputShape[4];
 
-    TfLiteParserFixture() : m_Parser( ), m_InputShape { 1, 2, 2, 1 } {
-        m_Parser.Create();
-    }
+    TfLiteParserFixture() : m_Parser( ), m_InputShape { 1, 2, 2, 1 } {}
     ~TfLiteParserFixture()          {  }
 
 };
diff --git a/src/armnnTfLiteParser/test/ResizeBilinear.cpp b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
index 400dc78..8af5612 100644
--- a/src/armnnTfLiteParser/test/ResizeBilinear.cpp
+++ b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
@@ -10,8 +10,6 @@
 #include <string>
 #include <iostream>
 
-using armnnTfLiteParser::TfLiteParser;
-
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
 struct ResizeBilinearFixture : public ParserFlatbuffersFixture
diff --git a/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp b/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
index fada810..7add5f2 100644
--- a/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
+++ b/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
@@ -10,8 +10,6 @@
 #include <string>
 #include <iostream>
 
-using armnnTfLiteParser::TfLiteParser;
-
 BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
 
 struct ResizeNearestNeighborFixture : public ParserFlatbuffersFixture
diff --git a/src/armnnTfParser/TfParser.cpp b/src/armnnTfParser/TfParser.cpp
index aec8df8..f926013 100755
--- a/src/armnnTfParser/TfParser.cpp
+++ b/src/armnnTfParser/TfParser.cpp
@@ -32,6 +32,56 @@
 
 namespace armnnTfParser
 {
+
+ITfParser::ITfParser() : pTfParserImpl(new ITfParser::TfParserImpl()){}
+
+ITfParser::~ITfParser() = default;
+
+ITfParser *ITfParser::CreateRaw()
+{
+    return new ITfParser();
+}
+
+ITfParserPtr ITfParser::Create()
+{
+    return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
+}
+
+void ITfParser::Destroy(ITfParser *parser)
+{
+    delete parser;
+}
+
+armnn::INetworkPtr ITfParser::CreateNetworkFromTextFile(const char* graphFile,
+                                                        const std::map<std::string, armnn::TensorShape>& inputShapes,
+                                                        const std::vector<std::string>& requestedOutputs)
+{
+    return pTfParserImpl->CreateNetworkFromTextFile(graphFile, inputShapes, requestedOutputs);
+}
+
+armnn::INetworkPtr ITfParser::CreateNetworkFromBinaryFile(const char* graphFile,
+                                                          const std::map<std::string, armnn::TensorShape>& inputShapes,
+                                                          const std::vector<std::string>& requestedOutputs)
+{
+    return pTfParserImpl->CreateNetworkFromBinaryFile(graphFile, inputShapes, requestedOutputs);
+}
+
+armnn::INetworkPtr ITfParser::CreateNetworkFromString(const char* protoText,
+                                                      const std::map<std::string, armnn::TensorShape>& inputShapes,
+                                                      const std::vector<std::string>& requestedOutputs)
+{
+    return pTfParserImpl->CreateNetworkFromString(protoText, inputShapes, requestedOutputs);
+}
+
+BindingPointInfo ITfParser::GetNetworkInputBindingInfo(const std::string& name) const
+{
+    return pTfParserImpl->GetNetworkInputBindingInfo(name);
+}
+
+BindingPointInfo ITfParser::GetNetworkOutputBindingInfo(const std::string& name) const
+{
+    return pTfParserImpl->GetNetworkOutputBindingInfo(name);
+}
 namespace
 {
 
@@ -324,69 +374,55 @@
 
 } // namespace
 
-const std::map<std::string, TfParser::OperationParsingFunction> TfParser::ms_OperationNameToParsingFunctions = {
-    { "Const",                 &TfParser::ParseConst },
-    { "Add",                   &TfParser::ParseAdd },
-    { "AddN",                  &TfParser::ParseAddN },
-    { "BiasAdd",               &TfParser::ParseBiasAdd },
-    { "Identity",              &TfParser::ParseIdentity },
-    { "Conv2D",                &TfParser::ParseConv2D },
-    { "DepthwiseConv2dNative", &TfParser::ParseDepthwiseConv2D },
-    { "ExpandDims",            &TfParser::ParseExpandDims },
-    { "FusedBatchNorm",        &TfParser::ParseFusedBatchNorm },
-    { "Gather",                &TfParser::ParseGather},
-    { "Greater",               &TfParser::ParseGreater},
-    { "ConcatV2",              &TfParser::ParseConcat },
-    { "LRN",                   &TfParser::ParseLrn },
-    { "MatMul",                &TfParser::ParseMatMul },
-    { "Mean",                  &TfParser::ParseMean },
-    { "Mul",                   &TfParser::ParseMul },
-    { "Placeholder",           &TfParser::ParsePlaceholder },
-    { "RealDiv",               &TfParser::ParseRealDiv },
-    { "Relu",                  &TfParser::ParseRelu },
-    { "Relu6",                 &TfParser::ParseRelu6 },
-    { "Reshape",               &TfParser::ParseReshape },
-    { "ResizeBilinear",        &TfParser::ParseResizeBilinear },
-    { "Rsqrt",                 &TfParser::ParseRsqrt },
-    { "Shape",                 &TfParser::ParseShape },
-    { "Squeeze",               &TfParser::ParseSqueeze },
-    { "Sigmoid",               &TfParser::ParseSigmoid },
-    { "Softmax",               &TfParser::ParseSoftmax },
-    { "Softplus",              &TfParser::ParseSoftplus },
-    { "Split",                 &TfParser::ParseSplit },
-    { "StridedSlice",          &TfParser::ParseStridedSlice },
-    { "Tanh",                  &TfParser::ParseTanh },
-    { "MaxPool",               &TfParser::ParseMaxPool },
-    { "AvgPool",               &TfParser::ParseAvgPool },
-    { "Maximum",               &TfParser::ParseMaximum },
-    { "Minimum",               &TfParser::ParseMinimum },
-    { "Equal",                 &TfParser::ParseEqual },
-    { "Pad",                   &TfParser::ParsePad },
-    { "Sub",                   &TfParser::ParseSub },
-    { "Pack" ,                 &TfParser::ParseStack },
-    { "Stack",                 &TfParser::ParseStack },
-    { "Transpose",             &TfParser::ParseTranspose },
+const std::map<std::string, ITfParser::TfParserImpl::OperationParsingFunction>
+    ITfParser::TfParserImpl::ms_OperationNameToParsingFunctions = {
+    { "Const",                 &TfParserImpl::ParseConst },
+    { "Add",                   &TfParserImpl::ParseAdd },
+    { "AddN",                  &TfParserImpl::ParseAddN },
+    { "BiasAdd",               &TfParserImpl::ParseBiasAdd },
+    { "Identity",              &TfParserImpl::ParseIdentity },
+    { "Conv2D",                &TfParserImpl::ParseConv2D },
+    { "DepthwiseConv2dNative", &TfParserImpl::ParseDepthwiseConv2D },
+    { "ExpandDims",            &TfParserImpl::ParseExpandDims },
+    { "FusedBatchNorm",        &TfParserImpl::ParseFusedBatchNorm },
+    { "Gather",                &TfParserImpl::ParseGather},
+    { "Greater",               &TfParserImpl::ParseGreater},
+    { "ConcatV2",              &TfParserImpl::ParseConcat },
+    { "LRN",                   &TfParserImpl::ParseLrn },
+    { "MatMul",                &TfParserImpl::ParseMatMul },
+    { "Mean",                  &TfParserImpl::ParseMean },
+    { "Mul",                   &TfParserImpl::ParseMul },
+    { "Placeholder",           &TfParserImpl::ParsePlaceholder },
+    { "RealDiv",               &TfParserImpl::ParseRealDiv },
+    { "Relu",                  &TfParserImpl::ParseRelu },
+    { "Relu6",                 &TfParserImpl::ParseRelu6 },
+    { "Reshape",               &TfParserImpl::ParseReshape },
+    { "ResizeBilinear",        &TfParserImpl::ParseResizeBilinear },
+    { "Rsqrt",                 &TfParserImpl::ParseRsqrt },
+    { "Shape",                 &TfParserImpl::ParseShape },
+    { "Squeeze",               &TfParserImpl::ParseSqueeze },
+    { "Sigmoid",               &TfParserImpl::ParseSigmoid },
+    { "Softmax",               &TfParserImpl::ParseSoftmax },
+    { "Softplus",              &TfParserImpl::ParseSoftplus },
+    { "Split",                 &TfParserImpl::ParseSplit },
+    { "StridedSlice",          &TfParserImpl::ParseStridedSlice },
+    { "Tanh",                  &TfParserImpl::ParseTanh },
+    { "MaxPool",               &TfParserImpl::ParseMaxPool },
+    { "AvgPool",               &TfParserImpl::ParseAvgPool },
+    { "Maximum",               &TfParserImpl::ParseMaximum },
+    { "Minimum",               &TfParserImpl::ParseMinimum },
+    { "Equal",                 &TfParserImpl::ParseEqual },
+    { "Pad",                   &TfParserImpl::ParsePad },
+    { "Sub",                   &TfParserImpl::ParseSub },
+    { "Pack" ,                 &TfParserImpl::ParseStack },
+    { "Stack",                 &TfParserImpl::ParseStack },
+    { "Transpose",             &TfParserImpl::ParseTranspose },
 };
 
-const std::list<std::string> TfParser::m_ControlInputs = {
+const std::list<std::string> ITfParser::TfParserImpl::m_ControlInputs = {
     "Assert"
 };
 
-ITfParser* ITfParser::CreateRaw()
-{
-    return new TfParser();
-}
-
-ITfParserPtr ITfParser::Create()
-{
-    return ITfParserPtr(CreateRaw(), &ITfParser::Destroy);
-}
-
-void ITfParser::Destroy(ITfParser* parser)
-{
-    delete parser;
-}
-
 inline void CalculateSamePadding(uint32_t inputSize, uint32_t stride,
                                  uint32_t filterSize, bool samePadding,
                                  uint32_t* paddingFront, uint32_t* paddingBack) {
@@ -415,7 +451,7 @@
 class ParsedTfOperation
 {
 public:
-    ParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
+    ParsedTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
     : m_Parser(parser)
     , m_Node(node)
     {
@@ -436,7 +472,7 @@
     }
 
 protected:
-    TfParser* m_Parser;
+    ITfParser::TfParserImpl* m_Parser;
     const tensorflow::NodeDef& m_Node;
 };
 
@@ -445,7 +481,9 @@
 class SingleLayerParsedTfOperation : public ParsedTfOperation
 {
 public:
-    SingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node, IConnectableLayer* layer)
+    SingleLayerParsedTfOperation(ITfParser::TfParserImpl* parser,
+                                 const tensorflow::NodeDef& node,
+                                 IConnectableLayer* layer)
     : ParsedTfOperation(parser, node)
     , m_Layer(layer)
     {
@@ -476,7 +514,7 @@
 class DeferredSingleLayerParsedTfOperation : public SingleLayerParsedTfOperation
 {
 public:
-    DeferredSingleLayerParsedTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
+    DeferredSingleLayerParsedTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
     : SingleLayerParsedTfOperation(parser, node, nullptr)
     {
     }
@@ -495,13 +533,13 @@
 };
 
 
-TfParser::TfParser()
+ITfParser::TfParserImpl::TfParserImpl()
     : m_Network(nullptr, nullptr)
 {
 }
 
 
-const tensorflow::NodeDef* TfParser::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
+const tensorflow::NodeDef* ITfParser::TfParserImpl::ResolveIdentityNode(const tensorflow::NodeDef* nodeDef)
 {
     if (nodeDef->op() != "Identity")
     {
@@ -533,7 +571,7 @@
 }
 
 std::vector<OutputOfConstNodeDef>
-TfParser::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
+ITfParser::TfParserImpl::GetTfInputNodes(const tensorflow::NodeDef& nodeDef) const
 {
     std::vector<OutputOfConstNodeDef> ret;
 
@@ -570,7 +608,7 @@
 }
 
 std::vector<OutputOfParsedTfOperation>
-TfParser::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
+ITfParser::TfParserImpl::GetInputParsedTfOperationsChecked(const tensorflow::NodeDef& nodeDef,
                                             std::size_t expectedNumInputs)
 {
     // Fetches the tensorflow nodes connected as inputs and validate the size.
@@ -605,7 +643,7 @@
     return result;
 }
 
-IConnectableLayer* TfParser::CreateAdditionLayer(
+IConnectableLayer* ITfParser::TfParserImpl::CreateAdditionLayer(
             const tensorflow::NodeDef& nodeDef,
             IOutputSlot* input0Slot,
             IOutputSlot* input1Slot,
@@ -660,7 +698,7 @@
     return layer;
 }
 
-IConnectableLayer* TfParser::CreateAdditionLayer(
+IConnectableLayer* ITfParser::TfParserImpl::CreateAdditionLayer(
             const tensorflow::NodeDef& nodeDef,
             IConnectableLayer* layerOne,
             IConnectableLayer* layerTwo,
@@ -679,7 +717,7 @@
     return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
 }
 
-IConnectableLayer* TfParser::CreateAdditionLayer(
+IConnectableLayer* ITfParser::TfParserImpl::CreateAdditionLayer(
         const tensorflow::NodeDef& nodeDef,
         const OutputOfParsedTfOperation& opOne,
         const OutputOfParsedTfOperation& opTwo,
@@ -692,7 +730,7 @@
     return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, layerName);
 }
 
-IConnectableLayer* TfParser::CreateAdditionLayer(
+IConnectableLayer* ITfParser::TfParserImpl::CreateAdditionLayer(
             const tensorflow::NodeDef& nodeDef,
             const OutputOfParsedTfOperation& op,
             IConnectableLayer* layer)
@@ -702,7 +740,8 @@
     return CreateAdditionLayer(nodeDef, input0Slot, input1Slot, nodeDef.name());
 }
 
-ParsedTfOperationPtr TfParser::ParseAddN(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseAddN(const tensorflow::NodeDef& nodeDef,
+                                                        const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     uint32_t numberOfInputs = ReadMandatoryNodeUint32Attribute(nodeDef, "N");
@@ -780,7 +819,8 @@
     }
 }
 
-ParsedTfOperationPtr TfParser::ParseAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseAdd(const tensorflow::NodeDef& nodeDef,
+                                                       const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
@@ -810,7 +850,8 @@
     }
 }
 
-ParsedTfOperationPtr TfParser::ParseBiasAdd(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseBiasAdd(const tensorflow::NodeDef& nodeDef,
+                                                           const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     return AddAdditionLayer(nodeDef, true);
@@ -820,7 +861,9 @@
 class ParsedIdentityTfOperation : public ParsedTfOperation
 {
 public:
-    ParsedIdentityTfOperation(TfParser* parser, const tensorflow::NodeDef& node, ParsedTfOperation* representative)
+    ParsedIdentityTfOperation(ITfParser::TfParserImpl* parser,
+                              const tensorflow::NodeDef& node,
+                              ParsedTfOperation* representative)
         : ParsedTfOperation(parser, node)
         , m_Representative(representative)
     {
@@ -841,7 +884,8 @@
     ParsedTfOperation* m_Representative;
 };
 
-ParsedTfOperationPtr TfParser::ParseIdentity(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseIdentity(const tensorflow::NodeDef& nodeDef,
+                                                            const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
@@ -856,7 +900,7 @@
 class ParsedConstTfOperation : public DeferredSingleLayerParsedTfOperation
 {
 public:
-    ParsedConstTfOperation(TfParser* parser, const tensorflow::NodeDef& node,
+    ParsedConstTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node,
         const T* tensorData, const TensorInfo& tensorInfo)
         : DeferredSingleLayerParsedTfOperation(parser, node),
         m_Storage(tensorData, tensorData + tensorInfo.GetNumElements()),
@@ -868,7 +912,8 @@
     void CreateLayerDeferred() override
     {
         ARMNN_ASSERT(m_Layer == nullptr);
-        m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage), m_Node.name().c_str());
+        m_Layer = m_Parser->m_Network->AddConstantLayer(ConstTensor(m_TensorInfo, m_Storage),
+                                                                       m_Node.name().c_str());
         m_Layer->GetOutputSlot(0).SetTensorInfo(m_TensorInfo);
     }
 
@@ -982,8 +1027,9 @@
 struct MakeTfOperation
 {
     template<typename DataType, class... Args>
-    inline static std::unique_ptr<OperatorType<DataType>> Parse(TfParser* parser, const tensorflow::NodeDef& node,
-        Args&&... args)
+    inline static std::unique_ptr<OperatorType<DataType>> Parse(ITfParser::TfParserImpl* parser,
+                                                                const tensorflow::NodeDef& node,
+                                                                Args&&... args)
     {
         return std::make_unique<OperatorType<DataType>>(parser, node, std::forward<Args>(args)...);
     }
@@ -993,7 +1039,7 @@
 struct MakeTfOperation<ParsedConstTfOperation>
 {
     template<typename DataType, class... Args>
-    inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(TfParser* parser,
+    inline static std::unique_ptr<ParsedConstTfOperation<DataType>> Parse(ITfParser::TfParserImpl* parser,
         const tensorflow::NodeDef& node, const std::vector<int8_t>& tensorData, const TensorInfo& tensorInfo)
     {
         return std::make_unique<ParsedConstTfOperation<DataType>>(parser, node,
@@ -1033,7 +1079,8 @@
     }
 };
 
-ParsedTfOperationPtr TfParser::ParseConst(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConst(const tensorflow::NodeDef& nodeDef,
+                                                         const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     ARMNN_ASSERT(nodeDef.op() == "Const");
@@ -1128,7 +1175,7 @@
 }
 
 template<typename Type>
-bool TfParser::HasParsedConstTensor(const std::string & nodeName) const
+bool ITfParser::TfParserImpl::HasParsedConstTensor(const std::string & nodeName) const
 {
     auto it = m_ParsedTfOperations.find(nodeName);
     if (it == m_ParsedTfOperations.end())
@@ -1139,12 +1186,12 @@
 }
 
 template<typename Type>
-bool TfParser::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
+bool ITfParser::TfParserImpl::HasParsedConstTensor(ParsedTfOperation* parsedTfOpPtr) const
 {
     return dynamic_cast<ParsedConstTfOperation<Type>*>(parsedTfOpPtr) != nullptr;
 }
 
-unsigned int TfParser::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
+unsigned int ITfParser::TfParserImpl::GetConstInputIndex(const std::vector<OutputOfParsedTfOperation>& inputs)
 {
     for (unsigned int i = 0; i < inputs.size(); i++)
     {
@@ -1159,7 +1206,7 @@
 
 }
 
-ParsedTfOperationPtr TfParser::ParseConv2D(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConv2D(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -1297,8 +1344,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
-                                                    const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseDepthwiseConv2D(const tensorflow::NodeDef& nodeDef,
+                                                                   const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
@@ -1486,7 +1533,8 @@
     return outTensorInfo;
 }
 
-ParsedTfOperationPtr TfParser::ParseExpandDims(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseExpandDims(const tensorflow::NodeDef& nodeDef,
+                                                             const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
 
@@ -1568,8 +1616,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
-                                                   const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseFusedBatchNorm(const tensorflow::NodeDef& nodeDef,
+                                                                 const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 5);
@@ -1659,11 +1707,11 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-bool TfParser::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
-                                           size_t alphaLayerIndex,
-                                           const OutputOfParsedTfOperation& otherOp,
-                                           armnn::IOutputSlot** outputOfLeakyRelu,
-                                           armnn::ActivationDescriptor & desc)
+bool ITfParser::TfParserImpl::IsSupportedLeakyReluPattern(const tensorflow::NodeDef& mulNodeDef,
+                                                         size_t alphaLayerIndex,
+                                                         const OutputOfParsedTfOperation& otherOp,
+                                                         armnn::IOutputSlot** outputOfLeakyRelu,
+                                                         armnn::ActivationDescriptor & desc)
 {
     const tensorflow::NodeDef& otherNodeDef = otherOp.m_IndexedValue->GetNode();
 
@@ -1709,8 +1757,8 @@
     return false;
 }
 
-ParsedTfOperationPtr TfParser::ParseMaximum(const tensorflow::NodeDef& nodeDef,
-                                            const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMaximum(const tensorflow::NodeDef& nodeDef,
+                                                          const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
@@ -1757,7 +1805,7 @@
     }
 }
 
-std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> TfParser::ProcessElementwiseInputSlots(
+std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> ITfParser::TfParserImpl::ProcessElementwiseInputSlots(
             const tensorflow::NodeDef& nodeDef, const std::string& layerName)
 {
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
@@ -1791,7 +1839,7 @@
     return {input0Slot, input1Slot};
 }
 
-ParsedTfOperationPtr TfParser::ProcessComparisonLayer(
+ParsedTfOperationPtr ITfParser::TfParserImpl::ProcessComparisonLayer(
     IOutputSlot* input0Slot,
     IOutputSlot* input1Slot,
     IConnectableLayer* const layer,
@@ -1818,7 +1866,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ProcessElementwiseLayer(
+ParsedTfOperationPtr ITfParser::TfParserImpl::ProcessElementwiseLayer(
         IOutputSlot* input0Slot,
         IOutputSlot* input1Slot,
         IConnectableLayer* const layer,
@@ -1844,8 +1892,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseGather(const tensorflow::NodeDef& nodeDef,
-                                           const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseGather(const tensorflow::NodeDef& nodeDef,
+                                                         const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
@@ -1883,8 +1931,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseGreater(const tensorflow::NodeDef& nodeDef,
-                                            const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseGreater(const tensorflow::NodeDef& nodeDef,
+                                                          const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Greater");
@@ -1897,8 +1945,8 @@
     return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
 }
 
-ParsedTfOperationPtr TfParser::ParseEqual(const tensorflow::NodeDef& nodeDef,
-                                          const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseEqual(const tensorflow::NodeDef& nodeDef,
+                                                        const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Equal");
@@ -1911,8 +1959,8 @@
     return ProcessComparisonLayer(input0Slot, input1Slot, layer, nodeDef);
 }
 
-ParsedTfOperationPtr TfParser::ParseMinimum(const tensorflow::NodeDef& nodeDef,
-                                            const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMinimum(const tensorflow::NodeDef& nodeDef,
+                                                          const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::pair<armnn::IOutputSlot*, armnn::IOutputSlot*> inputLayers = ProcessElementwiseInputSlots(nodeDef, "Minimum");
@@ -1924,7 +1972,8 @@
     return ProcessElementwiseLayer(input0Slot, input1Slot, layer, nodeDef);
 }
 
-ParsedTfOperationPtr TfParser::ParseSub(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSub(const tensorflow::NodeDef& nodeDef,
+                                                      const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
@@ -1964,7 +2013,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseStack(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseStack(const tensorflow::NodeDef& nodeDef,
+                                                        const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
@@ -2049,7 +2099,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseTranspose(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseTranspose(const tensorflow::NodeDef& nodeDef,
+                                                            const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
 
@@ -2142,8 +2193,8 @@
     return paddedTensorInfo;
 }
 
-ParsedTfOperationPtr TfParser::ParsePad(const tensorflow::NodeDef& nodeDef,
-                                        const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParsePad(const tensorflow::NodeDef& nodeDef,
+                                                      const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     // input consists of:
@@ -2213,8 +2264,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseConcat(const tensorflow::NodeDef& nodeDef,
-                                           const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseConcat(const tensorflow::NodeDef& nodeDef,
+                                                         const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfConstNodeDef> nodes = GetTfInputNodes(nodeDef);
@@ -2295,7 +2346,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseShape(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseShape(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2336,7 +2387,7 @@
                                                              shapeTensorInfo);
 }
 
-ParsedTfOperationPtr TfParser::ParseReshape(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseReshape(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2373,7 +2424,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseResizeBilinear(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2504,7 +2555,8 @@
     return outTensorInfo;
 }
 
-ParsedTfOperationPtr TfParser::ParseSqueeze(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSqueeze(const tensorflow::NodeDef& nodeDef,
+                                                          const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
@@ -2524,7 +2576,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseLrn(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseLrn(const tensorflow::NodeDef& nodeDef,
+                                                      const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
@@ -2558,7 +2611,7 @@
 class ParsedMatMulTfOperation : public DeferredSingleLayerParsedTfOperation
 {
 public:
-    ParsedMatMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
+    ParsedMatMulTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
         : DeferredSingleLayerParsedTfOperation(parser, node)
     {
     }
@@ -2570,7 +2623,8 @@
     }
 };
 
-ParsedTfOperationPtr TfParser::ParseMatMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMatMul(const tensorflow::NodeDef& nodeDef,
+                                                         const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
 
@@ -2578,7 +2632,8 @@
     return std::make_unique<ParsedMatMulTfOperation>(this, nodeDef);
 }
 
-ParsedTfOperationPtr TfParser::ParseMean(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMean(const tensorflow::NodeDef& nodeDef,
+                                                       const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
@@ -2641,7 +2696,7 @@
 class ParsedMulTfOperation : public DeferredSingleLayerParsedTfOperation
 {
 public:
-    ParsedMulTfOperation(TfParser* parser, const tensorflow::NodeDef& node)
+    ParsedMulTfOperation(ITfParser::TfParserImpl* parser, const tensorflow::NodeDef& node)
         : DeferredSingleLayerParsedTfOperation(parser, node)
     {
     }
@@ -2653,14 +2708,15 @@
     }
 };
 
-ParsedTfOperationPtr TfParser::ParseMul(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMul(const tensorflow::NodeDef& nodeDef,
+                                                      const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
 
     return std::make_unique<ParsedMulTfOperation>(this, nodeDef);
 }
 
-ParsedTfOperationPtr TfParser::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParsePlaceholder(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2688,13 +2744,14 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseRealDiv(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseRealDiv(const tensorflow::NodeDef& nodeDef,
+                                                          const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
     return AddRealDivLayer(nodeDef);
 }
 
-ParsedTfOperationPtr TfParser::ParseRelu(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseRelu(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2704,7 +2761,7 @@
     return AddActivationLayer(nodeDef, activationDesc);
 }
 
-ParsedTfOperationPtr TfParser::ParseRelu6(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseRelu6(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2717,7 +2774,7 @@
     return AddActivationLayer(nodeDef, activationDesc);
 }
 
-ParsedTfOperationPtr TfParser::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSigmoid(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2728,7 +2785,7 @@
     return AddActivationLayer(nodeDef, activationDesc);
 }
 
-ParsedTfOperationPtr TfParser::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseRsqrt(const tensorflow::NodeDef &nodeDef,
     const tensorflow::GraphDef &graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2745,7 +2802,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSoftmax(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2762,7 +2819,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseSplit(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSplit(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2853,7 +2910,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseSoftplus(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
@@ -2864,8 +2921,8 @@
     return AddActivationLayer(nodeDef, activationDesc);
 }
 
-ParsedTfOperationPtr TfParser::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
-                                                 const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseStridedSlice(const tensorflow::NodeDef& nodeDef,
+                                                               const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
 
@@ -2912,7 +2969,8 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseTanh(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseTanh(const tensorflow::NodeDef& nodeDef,
+                                                        const tensorflow::GraphDef& graphDef)
 {
     IgnoreUnused(graphDef);
 
@@ -2924,7 +2982,7 @@
     return AddActivationLayer(nodeDef, activationDesc);
 }
 
-ParsedTfOperationPtr TfParser::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::AddActivationLayer(const tensorflow::NodeDef& nodeDef,
     ActivationDescriptor& activationDesc)
 {
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 1);
@@ -2937,19 +2995,19 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseMaxPool(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Max);
 }
 
-ParsedTfOperationPtr TfParser::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParseAvgPool(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef)
 {
     return ParsePooling2d(nodeDef, graphDef, PoolingAlgorithm::Average);
 }
 
-ParsedTfOperationPtr TfParser::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
+ParsedTfOperationPtr ITfParser::TfParserImpl::ParsePooling2d(const tensorflow::NodeDef& nodeDef,
     const tensorflow::GraphDef& graphDef, PoolingAlgorithm pooltype)
 {
     IgnoreUnused(graphDef);
@@ -3058,7 +3116,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
+ParsedTfOperationPtr ITfParser::TfParserImpl::AddAdditionLayer(const tensorflow::NodeDef& nodeDef, bool isBiasAdd)
 {
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
@@ -3138,7 +3196,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::AddRealDivLayer(const tensorflow::NodeDef& nodeDef)
 {
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
@@ -3176,7 +3234,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-ParsedTfOperationPtr TfParser::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
+ParsedTfOperationPtr ITfParser::TfParserImpl::AddMaximumLayer(const tensorflow::NodeDef& nodeDef)
 {
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
@@ -3219,7 +3277,7 @@
     return std::make_unique<SingleLayerParsedTfOperation>(this, nodeDef, layer);
 }
 
-IConnectableLayer* TfParser::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
+IConnectableLayer* ITfParser::TfParserImpl::AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef)
 {
     std::vector<OutputOfParsedTfOperation> inputs = GetInputParsedTfOperationsChecked(nodeDef, 2);
 
@@ -3255,7 +3313,7 @@
     return layer;
 }
 
-IConnectableLayer* TfParser::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
+IConnectableLayer* ITfParser::TfParserImpl::AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
     const tensorflow::NodeDef* addNodeDef, const char* armnnLayerName)
 {
     // Finds bias const (if applicable).
@@ -3353,7 +3411,7 @@
     return layer;
 }
 
-void TfParser::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
+void ITfParser::TfParserImpl::LoadNodeDef(const tensorflow::NodeDef& nodeDef, const tensorflow::GraphDef& graphDef)
 {
     // Gets the type of the node (assume float).
     tensorflow::DataType type = tensorflow::DT_FLOAT;
@@ -3426,7 +3484,7 @@
     }
 }
 
-void TfParser::LoadGraphDef(const tensorflow::GraphDef& graphDef)
+void ITfParser::TfParserImpl::LoadGraphDef(const tensorflow::GraphDef& graphDef)
 {
     // Adds all nodes to our map.
     m_NodesByName.clear();
@@ -3496,7 +3554,7 @@
     }
 }
 
-INetworkPtr TfParser::CreateNetworkFromTextFile(const char* graphFile,
+INetworkPtr ITfParser::TfParserImpl::CreateNetworkFromTextFile(const char* graphFile,
     const std::map<std::string, TensorShape>& inputShapes,
     const std::vector<std::string>& requestedOutputs)
 {
@@ -3527,7 +3585,7 @@
     return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
 }
 
-INetworkPtr TfParser::CreateNetworkFromString(const char* protoText,
+INetworkPtr ITfParser::TfParserImpl::CreateNetworkFromString(const char* protoText,
     const std::map<std::string, TensorShape>& inputShapes,
     const std::vector<std::string>& requestedOutputs)
 {
@@ -3545,7 +3603,7 @@
     return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
 }
 
-INetworkPtr TfParser::CreateNetworkFromBinaryFile(const char* graphFile,
+INetworkPtr ITfParser::TfParserImpl::CreateNetworkFromBinaryFile(const char* graphFile,
     const std::map<std::string, TensorShape>& inputShapes,
     const std::vector<std::string>& requestedOutputs)
 {
@@ -3579,7 +3637,7 @@
     return CreateNetworkFromGraphDef(graphDef, inputShapes, requestedOutputs);
 }
 
-INetworkPtr TfParser::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
+INetworkPtr ITfParser::TfParserImpl::CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
     const std::map<std::string, TensorShape>& inputShapes,
     const std::vector<std::string>& requestedOutputs)
 {
@@ -3609,7 +3667,7 @@
     return std::move(m_Network);
 }
 
-void TfParser::Cleanup()
+void ITfParser::TfParserImpl::Cleanup()
 {
     // Cleanup, in case we reuse this parser.
     m_InputShapes.clear();
@@ -3618,17 +3676,17 @@
     m_ParsedTfOperations.clear();
 }
 
-BindingPointInfo TfParser::GetNetworkInputBindingInfo(const std::string& name) const
+BindingPointInfo ITfParser::TfParserImpl::GetNetworkInputBindingInfo(const std::string& name) const
 {
     return GetBindingInfo(name, "input", m_NetworkInputsBindingInfo);
 }
 
-BindingPointInfo TfParser::GetNetworkOutputBindingInfo(const std::string& name) const
+BindingPointInfo ITfParser::TfParserImpl::GetNetworkOutputBindingInfo(const std::string& name) const
 {
     return GetBindingInfo(name, "output", m_NetworkOutputsBindingInfo);
 }
 
-std::pair<LayerBindingId, TensorInfo> TfParser::GetBindingInfo(const std::string& layerName,
+std::pair<LayerBindingId, TensorInfo> ITfParser::TfParserImpl::GetBindingInfo(const std::string& layerName,
     const char* bindingPointDesc,
     const std::unordered_map<std::string, BindingPointInfo>& nameToBindingInfo)
 {
@@ -3644,17 +3702,21 @@
     return it->second;
 }
 
-void TfParser::TrackInputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
+void ITfParser::TfParserImpl::TrackInputBinding(IConnectableLayer* layer,
+                                                LayerBindingId id,
+                                                const TensorInfo& tensorInfo)
 {
     return TrackBindingPoint(layer, id, tensorInfo, "input", m_NetworkInputsBindingInfo);
 }
 
-void TfParser::TrackOutputBinding(IConnectableLayer* layer, LayerBindingId id, const TensorInfo& tensorInfo)
+void ITfParser::TfParserImpl::TrackOutputBinding(IConnectableLayer* layer,
+                                                 LayerBindingId id,
+                                                 const TensorInfo& tensorInfo)
 {
     return TrackBindingPoint(layer, id, tensorInfo, "output", m_NetworkOutputsBindingInfo);
 }
 
-void TfParser::TrackBindingPoint(IConnectableLayer* layer,
+void ITfParser::TfParserImpl::TrackBindingPoint(IConnectableLayer* layer,
     LayerBindingId id,
     const TensorInfo& tensorInfo,
     const char* bindingPointDesc,
diff --git a/src/armnnTfParser/TfParser.hpp b/src/armnnTfParser/TfParser.hpp
index 94499ea..5c04cce 100644
--- a/src/armnnTfParser/TfParser.hpp
+++ b/src/armnnTfParser/TfParser.hpp
@@ -61,41 +61,38 @@
 using OutputOfConstNodeDef = WithOutputTensorIndex<const tensorflow::NodeDef*>;
 using OutputId = WithOutputTensorIndex<std::string>;
 
-class TfParser : public ITfParser
+struct ITfParser::TfParserImpl
 {
 public:
     /// Creates the network from a protobuf text file on the disk.
-    virtual armnn::INetworkPtr CreateNetworkFromTextFile(
+    armnn::INetworkPtr CreateNetworkFromTextFile(
         const char* graphFile,
         const std::map<std::string, armnn::TensorShape>& inputShapes,
-        const std::vector<std::string>& requestedOutputs) override;
+        const std::vector<std::string>& requestedOutputs);
 
     /// Creates the network from a protobuf binary file on the disk.
-    virtual armnn::INetworkPtr CreateNetworkFromBinaryFile(
+    armnn::INetworkPtr CreateNetworkFromBinaryFile(
         const char* graphFile,
         const std::map<std::string, armnn::TensorShape>& inputShapes,
-        const std::vector<std::string>& requestedOutputs) override;
+        const std::vector<std::string>& requestedOutputs);
 
     /// Creates the network directly from protobuf text in a string. Useful for debugging/testing.
-    virtual armnn::INetworkPtr CreateNetworkFromString(
+    armnn::INetworkPtr CreateNetworkFromString(
         const char* protoText,
         const std::map<std::string, armnn::TensorShape>& inputShapes,
-        const std::vector<std::string>& requestedOutputs) override;
+        const std::vector<std::string>& requestedOutputs);
 
     /// Retrieves binding info (layer id and tensor info) for the network input identified by the given layer name.
-    virtual BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const override;
+    BindingPointInfo GetNetworkInputBindingInfo(const std::string& name) const;
 
     /// Retrieves binding info (layer id and tensor info) for the network output identified by the given layer name.
-    virtual BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const override;
+    BindingPointInfo GetNetworkOutputBindingInfo(const std::string& name) const;
 
-public:
-    TfParser();
+    TfParserImpl();
+    ~TfParserImpl() = default;
 
-private:
-    template <typename T>
-    friend class ParsedConstTfOperation;
-    friend class ParsedMatMulTfOperation;
-    friend class ParsedMulTfOperation;
+    TfParserImpl(const TfParserImpl&) = delete;
+    TfParserImpl& operator=(const TfParserImpl&) = delete;
 
     /// Parses a GraphDef loaded into memory from one of the other CreateNetwork*.
     armnn::INetworkPtr CreateNetworkFromGraphDef(const tensorflow::GraphDef& graphDef,
@@ -177,7 +174,6 @@
     ParsedTfOperationPtr AddRealDivLayer(const tensorflow::NodeDef& nodeDef);
     ParsedTfOperationPtr AddMaximumLayer(const tensorflow::NodeDef& nodeDef);
 
-private:
     armnn::IConnectableLayer* AddMultiplicationLayer(const tensorflow::NodeDef& nodeDef);
 
     armnn::IConnectableLayer* AddFullyConnectedLayer(const tensorflow::NodeDef& matMulNodeDef,
@@ -251,8 +247,8 @@
     /// The network we're building. Gets cleared after it is passed to the user.
     armnn::INetworkPtr m_Network;
 
-    using OperationParsingFunction = ParsedTfOperationPtr(TfParser::*)(const tensorflow::NodeDef& nodeDef,
-                                                                 const tensorflow::GraphDef& graphDef);
+    using OperationParsingFunction = ParsedTfOperationPtr(TfParserImpl::*)(const tensorflow::NodeDef& nodeDef,
+                                                                           const tensorflow::GraphDef& graphDef);
 
     /// Map of TensorFlow operation names to parsing member functions.
     static const std::map<std::string, OperationParsingFunction> ms_OperationNameToParsingFunctions;