IVGCVSW-4873 Implement Pimpl Idiom for INetwork and IOptimizedNetwork

!android-nn-driver:5042

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: Ia1ce8b839e81b46428ba0f78463e085e5906958d
Signed-off-by: Francis Murtagh <francis.murtagh@arm.com>
Signed-off-by: Finn Williams <Finn.Williams@arm.com>
diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp
index c667d9c..98c9f3f 100644
--- a/include/armnn/INetwork.hpp
+++ b/include/armnn/INetwork.hpp
@@ -106,529 +106,6 @@
     ~IConnectableLayer() {}
 };
 
-using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>;
-
-/// Main network class which provides the interface for building up a neural network.
-/// This object is subsequently required by the IRuntime::Load() method.
-class INetwork
-{
-public:
-    static INetwork* CreateRaw(NetworkOptions networkOptions = {});
-    static INetworkPtr Create(NetworkOptions networkOptions = {});
-    static void Destroy(INetwork* network);
-
-    virtual Status PrintGraph() = 0;
-
-    /// Adds an input layer to the network.
-    /// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified.
-    /// when passing the inputs to the IRuntime::EnqueueWorkload() function.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr) = 0;
-
-    /// Adds an ArgMinMax layer to the network.
-    /// @param desc - Parameters for the L2 normalization operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
-                                                 const char* name = nullptr) = 0;
-
-    /// Add a Comparison layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @param desc - Descriptor for the comparison operation.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
-                                                  const char* name = nullptr) = 0;
-
-    /// Adds a concatenation layer to the network.
-    /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
-    ///                           process. Number of Views must be equal to the number of inputs, and their order
-    ///                           must match - e.g. first view corresponds to the first input, second view to the
-    ///                           second input, etc....
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
-                                              const char* name = nullptr) = 0;
-
-    /// Adds a 2D convolution layer to the network.
-    /// @param convolution2dDescriptor - Description of the 2D convolution layer.
-    /// @param weights - Tensor for the weights data.
-    /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
-                                                     const ConstTensor& weights,
-                                                     const Optional<ConstTensor>& biases,
-                                                     const char* name = nullptr) = 0;
-
-    ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
-    virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
-                                                     const ConstTensor& weights,
-                                                     const char* name = nullptr) = 0;
-
-    ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
-    virtual IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
-                                                     const ConstTensor& weights,
-                                                     const ConstTensor& biases,
-                                                     const char* name = nullptr) = 0;
-
-    /// Adds a depth to space layer to the network.
-    /// @param depthToSpaceDescriptor - Parameters for the depth to space operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
-                                                    const char* name = nullptr) = 0;
-
-    /// Adds a 2D depthwise convolution layer to the network.
-    /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
-    /// @param weights - Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
-    /// @param biases Optional tensor for the bias data. If specified, must match the output tensor shape.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddDepthwiseConvolution2dLayer(
-        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-        const ConstTensor& weights,
-        const Optional<ConstTensor>& biases,
-        const char* name = nullptr) = 0;
-
-    ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
-    virtual IConnectableLayer* AddDepthwiseConvolution2dLayer(
-        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-        const ConstTensor& weights,
-        const char* name = nullptr) = 0;
-
-    ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
-    virtual IConnectableLayer* AddDepthwiseConvolution2dLayer(
-        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
-        const ConstTensor& weights,
-        const ConstTensor& biases,
-        const char* name = nullptr) = 0;
-
-    /// Adds a Dequantize layer to the network.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddDequantizeLayer(const char* name = nullptr) = 0;
-
-    /// Adds a Detection PostProcess layer to the network.
-    /// @param descriptor - Description of the Detection PostProcess layer.
-    /// @param anchors - Tensor for anchors.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddDetectionPostProcessLayer(
-        const DetectionPostProcessDescriptor& descriptor,
-        const ConstTensor& anchors,
-        const char* name = nullptr) = 0;
-
-    /// Add an ElementwiseUnary layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @param desc - Descriptor for the elementwiseUnary operation.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
-                                                        const char* name = nullptr) = 0;
-
-    /// Add an Fill layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @param fillDescriptor - Descriptor for the fill operation.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
-                                            const char* name = nullptr) = 0;
-
-    /// Adds a fully connected layer to the network.
-    /// @param fullyConnectedDescriptor - Description of the fully connected layer.
-    /// @param weights - Tensor for the weights data.
-    /// @param biases - Optional tensor for the bias data.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                                      const ConstTensor& weights,
-                                                      const Optional<ConstTensor>& biases,
-                                                      const char* name = nullptr) = 0;
-
-    ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
-    virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                                      const ConstTensor& weights,
-                                                      const char* name = nullptr) = 0;
-
-    ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
-    virtual IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
-                                                      const ConstTensor& weights,
-                                                      const ConstTensor& biases,
-                                                      const char* name = nullptr) = 0;
-
-    /// Adds a permute layer to the network.
-    /// @param permuteDescriptor - PermuteDescriptor to configure the permute.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
-                                               const char* name = nullptr) = 0;
-
-    /// Adds a batch to space ND layer to the network.
-    /// @param batchToSpaceNdDescriptor - Description of the layer.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                                      const char* name = nullptr) = 0;
-
-    /// Adds a pooling layer to the network.
-    /// @param pooling2dDescriptor - Pooling2dDescriptor to configure the pooling.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
-        const char* name = nullptr) = 0;
-
-    /// Adds an activation layer to the network.
-    /// @param activationDescriptor - ActivationDescriptor to configure the activation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
-        const char* name = nullptr) = 0;
-
-    /// Adds a normalization layer to the network.
-    /// @param normalizationDescriptor - NormalizationDescriptor to configure the normalization.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
-        const char* name = nullptr) = 0;
-
-    /// Adds a slice layer to the network.
-    /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr) = 0;
-
-    /// Adds a softmax layer to the network.
-    /// If the data type is QAsymm8, then the output quantization parameters
-    /// must have a scale of 1/256 and an offset of 0
-    /// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
-        const char* name = nullptr) = 0;
-
-    /// Adds a splitter layer to the network.
-    /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
-    ///                             Number of Views must be equal to the number of outputs,
-    ///                             and their order must match - e.g. first view corresponds to
-    ///                             the first output, second view to the second output, etc....
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
-                                                const char* name = nullptr) = 0;
-
-    /// Adds a merge layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddMergeLayer(const char* name = nullptr) = 0;
-
-    /// Adds a concat layer to the network.
-    /// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation
-    ///                           process. Number of Views must be equal to the number of inputs, and their order
-    ///                           must match - e.g. first view corresponds to the first input, second view to the
-    ///                           second input, etc....
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
-    virtual IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
-        const char* name = nullptr) = 0;
-
-    /// Add absolute layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
-    virtual IConnectableLayer* AddAbsLayer(const char* name = nullptr) = 0;
-
-    /// Adds an addition layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddAdditionLayer(const char* name = nullptr) = 0;
-
-    /// Adds a multiplication layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr) = 0;
-
-    /// Adds a batch normalization layer to the network.
-    /// @param mean - Pre-calculated mean for each channel.
-    /// @param variance - Pre-calculated variance for each channel.
-    /// @param beta - Per-channel additive factor.
-    /// @param gamma - Per-channel multiplicative factor.
-    /// @return - Interface for configuring the layer.
-    /// @param name - Optional name for the layer.
-    virtual IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
-        const ConstTensor& mean,
-        const ConstTensor& variance,
-        const ConstTensor& beta,
-        const ConstTensor& gamma,
-        const char* name = nullptr) = 0;
-
-    /// Adds a rank layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddRankLayer(const char* name = nullptr) = 0;
-
-    /// Adds a resize bilinear layer to the network.
-    /// @param resizeDesc - Parameters for the resize operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
-    virtual IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
-                                                      const char* name = nullptr) = 0;
-
-    /// Adds a resize layer to the network.
-    /// @param resizeDescriptor - Parameters for the resize operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
-                                              const char* name = nullptr) = 0;
-
-    /// Adds a reduce layer to the network.
-    /// @param ReduceDescriptor - Parameters for the reduce operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
-                                              const char* name = nullptr) = 0;
-
-    /// Adds an instance normalization layer to the network.
-    /// @param desc - Parameters for the instance normalization operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
-                                                             const char* name = nullptr) = 0;
-
-    /// Adds an L2 normalization layer to the network.
-    /// Normalization is performed along dimension 1, but requires a 4d input.
-    /// @param desc - Parameters for the L2 normalization operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
-                                                       const char* name = nullptr) = 0;
-
-    /// Adds a log softmax layer to the network.
-    /// @param logSoftmaxDescriptor - LogSoftmaxDescriptor to configure the log softmax.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
-                                                  const char* name = nullptr) = 0;
-
-    /// Adds a layer with no inputs and a single output, which always corresponds to
-    /// the passed in constant tensor.
-    /// @param input - Tensor to be provided as the only output of the layer. The layer will maintain
-    ///                its own copy of the tensor data, meaning the memory referenced by @a input can
-    ///                be freed or reused after this function is called.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddConstantLayer(const ConstTensor& input,
-                                                const char* name = nullptr) = 0;
-
-    /// Adds a reshape layer to the network.
-    /// @param reshapeDescriptor - Parameters for the reshape operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
-                                               const char* name = nullptr) = 0;
-
-    /// Adds a space to batch layer to the network.
-    /// @param spaceToBatchNdDescriptor - Parameters for the space to batch operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                                      const char* name = nullptr) = 0;
-
-    /// Adds a space to depth layer to the network.
-    /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
-                                                    const char* name = nullptr) = 0;
-
-    /// Adds a floor layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddFloorLayer(const char* name = nullptr) = 0;
-
-    /// Adds an output layer to the network.
-    /// @param id - User generated id to uniquely identify a particular output. The same id needs to be specified
-    /// when passing the outputs to the IRuntime::EnqueueWorkload() function.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr) = 0;
-
-    /// Add a Lstm layer to the network
-    /// @param descriptor - Parameters for the Lstm operation
-    /// @param params - Weights and biases for the LSTM cell
-    /// @param name - Optional name for the layer
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddLstmLayer(const LstmDescriptor& descriptor,
-                                            const LstmInputParams& params,
-                                            const char* name = nullptr) = 0;
-
-    /// Adds a division layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddDivisionLayer(const char* name = nullptr) = 0;
-
-    /// Adds a subtraction layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) = 0;
-
-    /// Add a Maximum layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddMaximumLayer(const char* name = nullptr) = 0;
-
-    /// Add a Mean layer to the network.
-    /// @param meanDescriptor - Parameters for the mean operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) = 0;
-
-    /// Adds a fully pad layer to the network.
-    /// @param paddings - n by 2 tensor, where n is the rank of the input tensor,
-    ///                   such that paddings[i,0] indicates the amount of padding to add in front of dimonsion i, and
-    ///                   paddings[i,1] indicates the amount of padding to add after the end of dimension i
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor,
-                                           const char* name = nullptr) = 0;
-
-    /// Add a quantize layer to the network
-    ///@param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddQuantizeLayer(const char* name = nullptr) = 0;
-
-    /// Adds a strided slice layer to the network.
-    /// @param StridedSliceDescriptor - Parameters for the strided slice operation.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
-                                                    const char* name = nullptr) = 0;
-
-    /// Add a Minimum layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddMinimumLayer(const char* name = nullptr) = 0;
-
-    /// Add a Greater layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
-    virtual IConnectableLayer* AddGreaterLayer(const char* name = nullptr) = 0;
-
-    /// Add a Equal layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
-    virtual IConnectableLayer* AddEqualLayer(const char* name = nullptr) = 0;
-
-    /// Add Reciprocal of square root layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
-    virtual IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) = 0;
-
-    /// Add Gather layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    ARMNN_DEPRECATED_MSG("Use AddGatherLayer with descriptor instead")
-    virtual IConnectableLayer* AddGatherLayer(const char* name = nullptr) = 0;
-
-    /// Add Gather layer to the network.
-    /// @param descriptor - Description of the gather layer.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddGatherLayer(const GatherDescriptor& descriptor,
-                                              const char* name = nullptr) = 0;
-
-    /// Adds a switch layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddSwitchLayer(const char* name = nullptr) = 0;
-
-    /// Adds a PReLU layer to the network.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddPreluLayer(const char* name = nullptr) = 0;
-
-    /// Adds a 2D transpose convolution layer to the network.
-    /// @param descriptor - Description of the 2D transpose convolution layer.
-    /// @param weights - Tensor for the weights data.
-    /// @param biases - Optional tensor for the bias data.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
-                                                              const ConstTensor& weights,
-                                                              const Optional<ConstTensor>& biases,
-                                                              const char* name = nullptr) = 0;
-
-    /// Adds a transpose layer to the network.
-    /// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
-                                                 const char* name = nullptr) = 0;
-
-    /// Adds a stack layer to the network.
-    /// @param descriptor - Description of the stack layer.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
-                                             const char* name = nullptr) = 0;
-
-    /// Add a stand-in layer for a type unknown to the Arm NN framework.
-    /// Note: Due to the nature of this layer, no validation can be performed by the framework.
-    /// Furthermore, Any model containing this layer cannot make use of dynamic tensors since the
-    /// tensor sizes cannot be inferred.
-    /// @descriptor - Descriptor for the StandIn layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
-                                               const char* name = nullptr) = 0;
-
-    /// Add a QuantizedLstm layer to the network
-    /// @param params - The weights and biases for the Quantized LSTM cell
-    /// @param name - Optional name for the layer
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
-                                                     const char* name = nullptr) = 0;
-
-    /// Add a QLstm layer to the network
-    /// @param descriptor - Parameters for the QLstm operation
-    /// @param params - Weights and biases for the layer
-    /// @param name - Optional name for the layer
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
-                                             const LstmInputParams& params,
-                                             const char* name = nullptr) = 0;
-
-    /// Adds a Logical Binary layer to the network.
-    /// @param descriptor - Description of the Logical Binary layer.
-    /// @param name - Optional name for the layer.
-    /// @return - Interface for configuring the layer.
-    virtual IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
-                                                     const char* name = nullptr) = 0;
-
-    virtual void Accept(ILayerVisitor& visitor) const = 0;
-
-    virtual void ExecuteStrategy(IStrategy& strategy) const = 0;
-
-protected:
-    ~INetwork() {}
-};
-
-using IOptimizedNetworkPtr = std::unique_ptr<IOptimizedNetwork, void(*)(IOptimizedNetwork* network)>;
-
-class IOptimizedNetwork
-{
-public:
-    static void Destroy(IOptimizedNetwork* network);
-
-    virtual Status PrintGraph() = 0;
-    virtual Status SerializeToDot(std::ostream& stream) const = 0;
-
-    virtual profiling::ProfilingGuid GetGuid() const = 0;
-
-protected:
-    ~IOptimizedNetwork() {}
-};
 
 struct OptimizerOptions
 {
@@ -642,7 +119,7 @@
     {}
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16, bool importEnabled,
-        ModelOptions modelOptions = {})
+                     ModelOptions modelOptions = {})
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
@@ -691,6 +168,569 @@
     ModelOptions m_ModelOptions;
 };
 
+class IWorkloadFactory;
+class NetworkImpl;
+using INetworkPtr = std::unique_ptr<INetwork, void(*)(INetwork* network)>;
+using IOptimizedNetworkPtr = std::unique_ptr<IOptimizedNetwork, void(*)(IOptimizedNetwork* network)>;
+
+/// Main network class which provides the interface for building up a neural network.
+/// This object is subsequently required by the IRuntime::Load() method.
+class INetwork
+{
+public:
+    static INetwork* CreateRaw(NetworkOptions networkOptions = {});
+    static INetworkPtr Create(NetworkOptions networkOptions = {});
+    static void Destroy(INetwork* network);
+
+    Status PrintGraph();
+
+    /// Adds an input layer to the network.
+    /// @param id - User generated id to uniquely identify a particular input. The same id needs to be specified.
+    /// when passing the inputs to the IRuntime::EnqueueWorkload() function.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr);
+
+    /// Adds an ArgMinMax layer to the network.
+    /// @param desc - Parameters for the L2 normalization operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
+                                         const char* name = nullptr);
+
+    /// Add a Comparison layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @param desc - Descriptor for the comparison operation.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
+                                          const char* name = nullptr);
+
+    /// Adds a concatenation layer to the network.
+    /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
+    ///                           process. Number of Views must be equal to the number of inputs, and their order
+    ///                           must match - e.g. first view corresponds to the first input, second view to the
+    ///                           second input, etc....
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
+                                      const char* name = nullptr);
+
+    /// Adds a 2D convolution layer to the network.
+    /// @param convolution2dDescriptor - Description of the 2D convolution layer.
+    /// @param weights - Tensor for the weights data.
+    /// @param biases - Optional tensor for the bias data. If specified, must match the output tensor shape.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+                                             const ConstTensor& weights,
+                                             const Optional<ConstTensor>& biases,
+                                             const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+    IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+                                             const ConstTensor& weights,
+                                             const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
+    IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+                                             const ConstTensor& weights,
+                                             const ConstTensor& biases,
+                                             const char* name = nullptr);
+
+    /// Adds a depth to space layer to the network.
+    /// @param depthToSpaceDescriptor - Parameters for the depth to space operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
+                                            const char* name = nullptr);
+
+    /// Adds a 2D depthwise convolution layer to the network.
+    /// @param convolution2dDescriptor - Description of the 2D depthwise convolution layer.
+    /// @param weights - Tensor for the weights. Expected format: [channelMultiplier, inputChannels, height, width].
+    /// @param biases Optional tensor for the bias data. If specified, must match the output tensor shape.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddDepthwiseConvolution2dLayer(
+        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+        const ConstTensor& weights,
+        const Optional<ConstTensor>& biases,
+        const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
+    IConnectableLayer* AddDepthwiseConvolution2dLayer(
+        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+        const ConstTensor& weights,
+        const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
+    IConnectableLayer* AddDepthwiseConvolution2dLayer(
+        const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+        const ConstTensor& weights,
+        const ConstTensor& biases,
+        const char* name = nullptr);
+
+    /// Adds a Dequantize layer to the network.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
+
+    /// Adds a Detection PostProcess layer to the network.
+    /// @param descriptor - Description of the Detection PostProcess layer.
+    /// @param anchors - Tensor for anchors.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddDetectionPostProcessLayer(
+        const DetectionPostProcessDescriptor& descriptor,
+        const ConstTensor& anchors,
+        const char* name = nullptr);
+
+    /// Add an ElementwiseUnary layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @param desc - Descriptor for the elementwiseUnary operation.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+                                                const char* name = nullptr);
+
+    /// Add an Fill layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @param fillDescriptor - Descriptor for the fill operation.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
+                                    const char* name = nullptr);
+
+    /// Adds a fully connected layer to the network.
+    /// @param fullyConnectedDescriptor - Description of the fully connected layer.
+    /// @param weights - Tensor for the weights data.
+    /// @param biases - Optional tensor for the bias data.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+                                              const ConstTensor& weights,
+                                              const Optional<ConstTensor>& biases,
+                                              const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
+    IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+                                              const ConstTensor& weights,
+                                              const char* name = nullptr);
+
+    ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
+    IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+                                              const ConstTensor& weights,
+                                              const ConstTensor& biases,
+                                              const char* name = nullptr);
+
+    /// Adds a permute layer to the network.
+    /// @param permuteDescriptor - PermuteDescriptor to configure the permute.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
+                                       const char* name = nullptr);
+
+    /// Adds a batch to space ND layer to the network.
+    /// @param batchToSpaceNdDescriptor - Description of the layer.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+                                              const char* name = nullptr);
+
+    /// Adds a pooling layer to the network.
+    /// @param pooling2dDescriptor - Pooling2dDescriptor to configure the pooling.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
+        const char* name = nullptr);
+
+    /// Adds an activation layer to the network.
+    /// @param activationDescriptor - ActivationDescriptor to configure the activation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
+        const char* name = nullptr);
+
+    /// Adds a normalization layer to the network.
+    /// @param normalizationDescriptor - NormalizationDescriptor to configure the normalization.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
+        const char* name = nullptr);
+
+    /// Adds a slice layer to the network.
+    /// @param sliceDescriptor - SliceDescriptor to configure the slice operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
+
+    /// Adds a softmax layer to the network.
+    /// If the data type is QAsymm8, then the output quantization parameters
+    /// must have a scale of 1/256 and an offset of 0
+    /// @param softmaxDescriptor - SoftmaxDescriptor to configure the softmax.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
+        const char* name = nullptr);
+
+    /// Adds a splitter layer to the network.
+    /// @param splitterDescriptor - ViewsDescriptor to configure the splitting process.
+    ///                             Number of Views must be equal to the number of outputs,
+    ///                             and their order must match - e.g. first view corresponds to
+    ///                             the first output, second view to the second output, etc....
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
+                                        const char* name = nullptr);
+
+    /// Adds a merge layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddMergeLayer(const char* name = nullptr);
+
+    /// Adds a concat layer to the network.
+    /// @param mergerDescriptor - MergerDescriptor (synonym for OriginsDescriptor) to configure the concatenation
+    ///                           process. Number of Views must be equal to the number of inputs, and their order
+    ///                           must match - e.g. first view corresponds to the first input, second view to the
+    ///                           second input, etc....
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
+    IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
+        const char* name = nullptr);
+
+    /// Add absolute layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
+    IConnectableLayer* AddAbsLayer(const char* name = nullptr);
+
+    /// Adds an addition layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
+
+    /// Adds a multiplication layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
+
+    /// Adds a batch normalization layer to the network.
+    /// @param mean - Pre-calculated mean for each channel.
+    /// @param variance - Pre-calculated variance for each channel.
+    /// @param beta - Per-channel additive factor.
+    /// @param gamma - Per-channel multiplicative factor.
+    /// @return - Interface for configuring the layer.
+    /// @param name - Optional name for the layer.
+    IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
+        const ConstTensor& mean,
+        const ConstTensor& variance,
+        const ConstTensor& beta,
+        const ConstTensor& gamma,
+        const char* name = nullptr);
+
+    /// Adds a rank layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddRankLayer(const char* name = nullptr);
+
+    /// Adds a resize bilinear layer to the network.
+    /// @param resizeDesc - Parameters for the resize operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
+    IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
+                                              const char* name = nullptr);
+
+    /// Adds a resize layer to the network.
+    /// @param resizeDescriptor - Parameters for the resize operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
+                                      const char* name = nullptr);
+
+    /// Adds a reduce layer to the network.
+    /// @param ReduceDescriptor - Parameters for the reduce operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
+                                      const char* name = nullptr);
+
+    /// Adds an instance normalization layer to the network.
+    /// @param desc - Parameters for the instance normalization operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
+                                                     const char* name = nullptr);
+
+    /// Adds an L2 normalization layer to the network.
+    /// Normalization is performed along dimension 1, but requires a 4d input.
+    /// @param desc - Parameters for the L2 normalization operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
+                                               const char* name = nullptr);
+
+    /// Adds a log softmax layer to the network.
+    /// @param logSoftmaxDescriptor - LogSoftmaxDescriptor to configure the log softmax.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
+                                          const char* name = nullptr);
+
+    /// Adds a layer with no inputs and a single output, which always corresponds to
+    /// the passed in constant tensor.
+    /// @param input - Tensor to be provided as the only output of the layer. The layer will maintain
+    ///                its own copy of the tensor data, meaning the memory referenced by @a input can
+    ///                be freed or reused after this function is called.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddConstantLayer(const ConstTensor& input,
+                                        const char* name = nullptr);
+
+    /// Adds a reshape layer to the network.
+    /// @param reshapeDescriptor - Parameters for the reshape operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
+                                       const char* name = nullptr);
+
+    /// Adds a space to batch layer to the network.
+    /// @param spaceToBatchNdDescriptor - Parameters for the space to batch operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+                                              const char* name = nullptr);
+
+    /// Adds a space to depth layer to the network.
+    /// @param spaceToDepthDescriptor - Parameters for the space to depth operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                            const char* name = nullptr);
+
+    /// Adds a floor layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddFloorLayer(const char* name = nullptr);
+
+    /// Adds an output layer to the network.
+    /// @param id - User generated id to uniquely identify a particular output. The same id needs to be specified
+    /// when passing the outputs to the IRuntime::EnqueueWorkload() function.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr);
+
+    /// Add a Lstm layer to the network
+    /// @param descriptor - Parameters for the Lstm operation
+    /// @param params - Weights and biases for the LSTM cell
+    /// @param name - Optional name for the layer
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddLstmLayer(const LstmDescriptor& descriptor,
+                                    const LstmInputParams& params,
+                                    const char* name = nullptr);
+
+    /// Adds a division layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
+
+    /// Adds a subtraction layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
+
+    /// Add a Maximum layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
+
+    /// Add a Mean layer to the network.
+    /// @param meanDescriptor - Parameters for the mean operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
+
+    /// Adds a fully pad layer to the network.
+    /// @param paddings - n by 2 tensor, where n is the rank of the input tensor,
+    ///                   such that paddings[i,0] indicates the amount of padding to add in front of dimonsion i, and
+    ///                   paddings[i,1] indicates the amount of padding to add after the end of dimension i
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor,
+                                           const char* name = nullptr);
+
+    /// Add a quantize layer to the network
+    ///@param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddQuantizeLayer(const char* name = nullptr);
+
+    /// Adds a strided slice layer to the network.
+    /// @param StridedSliceDescriptor - Parameters for the strided slice operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
+                                                    const char* name = nullptr);
+
+    /// Add a Minimum layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
+
+    /// Add a Greater layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
+    IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
+
+    /// Add a Equal layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
+    IConnectableLayer* AddEqualLayer(const char* name = nullptr);
+
+    /// Add Reciprocal of square root layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
+    IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
+
+    /// Add Gather layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    ARMNN_DEPRECATED_MSG("Use AddGatherLayer with descriptor instead")
+    IConnectableLayer* AddGatherLayer(const char* name = nullptr);
+
+    /// Add Gather layer to the network.
+    /// @param descriptor - Description of the gather layer.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddGatherLayer(const GatherDescriptor& descriptor,
+                                              const char* name = nullptr);
+
+    /// Adds a switch layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
+
+    /// Adds a PReLU layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddPreluLayer(const char* name = nullptr);
+
+    /// Adds a 2D transpose convolution layer to the network.
+    /// @param descriptor - Description of the 2D transpose convolution layer.
+    /// @param weights - Tensor for the weights data.
+    /// @param biases - Optional tensor for the bias data.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+                                                              const ConstTensor& weights,
+                                                              const Optional<ConstTensor>& biases,
+                                                              const char* name = nullptr);
+
+    /// Adds a transpose layer to the network.
+    /// @param transposeDescriptor - TransposeDescriptor to configure the transpose.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+                                                 const char* name = nullptr);
+
+    /// Adds a stack layer to the network.
+    /// @param descriptor - Description of the stack layer.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddStackLayer(const StackDescriptor& descriptor,
+                                             const char* name = nullptr);
+
+    /// Add a stand-in layer for a type unknown to the Arm NN framework.
+    /// Note: Due to the nature of this layer, no validation can be performed by the framework.
+    /// Furthermore, Any model containing this layer cannot make use of dynamic tensors since the
+    /// tensor sizes cannot be inferred.
+    /// @descriptor - Descriptor for the StandIn layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
+                                               const char* name = nullptr);
+
+    /// Add a QuantizedLstm layer to the network
+    /// @param params - The weights and biases for the Quantized LSTM cell
+    /// @param name - Optional name for the layer
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
+                                                     const char* name = nullptr);
+
+    /// Add a QLstm layer to the network
+    /// @param descriptor - Parameters for the QLstm operation
+    /// @param params - Weights and biases for the layer
+    /// @param name - Optional name for the layer
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
+                                             const LstmInputParams& params,
+                                             const char* name = nullptr);
+
+    /// Adds a Logical Binary layer to the network.
+    /// @param descriptor - Description of the Logical Binary layer.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
+                                                     const char* name = nullptr);
+
+    void Accept(ILayerVisitor& visitor) const;
+
+    void ExecuteStrategy(IStrategy& strategy) const;
+
+protected:
+    ~INetwork();
+
+    friend class NetworkQuantizer;
+    friend void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& strategy);
+    friend class TestConnectionPreservation;
+    friend TensorInfo GetInputTensorInfo(const INetwork* network);
+    friend IOptimizedNetworkPtr Optimize(const INetwork& network,
+                                         const std::vector<BackendId>& backendPreferences,
+                                         const IDeviceSpec& deviceSpec,
+                                         const OptimizerOptions& options,
+                                         Optional<std::vector<std::string>&> messages);
+
+    INetwork(NetworkOptions networkOptions = {});
+
+    std::unique_ptr<NetworkImpl> pNetworkImpl;
+};
+
+struct BackendSettings;
+struct OptimizationResult;
+class OptimizedNetworkImpl;
+class IOptimizedNetwork
+{
+public:
+    static void Destroy(IOptimizedNetwork* network);
+
+    Status PrintGraph();
+    Status SerializeToDot(std::ostream& stream) const;
+
+    profiling::ProfilingGuid GetGuid() const;
+
+    IOptimizedNetwork(std::unique_ptr<Graph> graph);
+    IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl);
+    ~IOptimizedNetwork();
+
+protected:
+    friend class LoadedNetwork;
+    friend Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
+    friend ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr);
+    friend IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
+                                         const std::vector<BackendId>& backendPreferences,
+                                         const IDeviceSpec& deviceSpec,
+                                         const OptimizerOptions& options,
+                                         Optional<std::vector<std::string>&> messages);
+
+    template <typename PreCompiledWorkload, armnn::DataType dataType>
+    friend std::pair<armnn::IOptimizedNetworkPtr, std::unique_ptr<PreCompiledWorkload>> CreatePreCompiledWorkloadTest(
+        armnn::IWorkloadFactory& factory,
+        armnn::Graph& graph,
+        bool biasEnabled);
+
+    IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
+
+    std::unique_ptr<OptimizedNetworkImpl> pOptimizedNetworkImpl;
+};
+
 /// Create an optimized version of the network
 /// @param network INetwork description of the network to be optimized.
 /// @param backendPreferences The choice of the backend ordered by user preferences.
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 4a307e2..ea09231 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -80,7 +80,7 @@
 
 } // anonymous
 
-std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+std::unique_ptr<LoadedNetwork> LoadedNetwork::MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
                                                                 std::string& errorMessage,
                                                                 const INetworkProperties& networkProperties,
                                                                 profiling::ProfilingService&  profilingService)
@@ -115,7 +115,7 @@
     return loadedNetwork;
 }
 
-LoadedNetwork::LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+LoadedNetwork::LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
                              const INetworkProperties& networkProperties,
                              profiling::ProfilingService&  profilingService) :
                              m_OptimizedNetwork(std::move(net)),
@@ -128,7 +128,7 @@
     m_Profiler = std::make_shared<IProfiler>();
     ProfilerManager::GetInstance().RegisterProfiler(m_Profiler.get());
 
-    Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort();
+    Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
     //First create tensor handlers, backends and workload factories.
     //Handlers are created before workloads are.
     //Because workload creation can modify some of the handlers,
@@ -146,7 +146,7 @@
             if (backend->SupportsTensorAllocatorAPI())
             {
                 auto workloadFactory = backend->CreateWorkloadFactory(
-                    m_TensorHandleFactoryRegistry, m_OptimizedNetwork->GetModelOptions());
+                    m_TensorHandleFactoryRegistry, m_OptimizedNetwork->pOptimizedNetworkImpl->GetModelOptions());
                 m_WorkloadFactories.emplace(
                     std::make_pair(backendId, std::make_pair(std::move(workloadFactory), nullptr)));
             }
@@ -154,7 +154,7 @@
             {
                 IBackendInternal::IMemoryManagerSharedPtr memoryManager = backend->CreateMemoryManager();
                 auto workloadFactory = backend->CreateWorkloadFactory(
-                    memoryManager, m_OptimizedNetwork->GetModelOptions());
+                    memoryManager, m_OptimizedNetwork->pOptimizedNetworkImpl->GetModelOptions());
 
                 m_WorkloadFactories.emplace(
                     std::make_pair(backendId, std::make_pair(std::move(workloadFactory), memoryManager)));
@@ -267,7 +267,7 @@
     }
 
     // Set up memory.
-    m_OptimizedNetwork->GetGraph().AllocateDynamicBuffers();
+    m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().AllocateDynamicBuffers();
 
     // Now that the intermediate tensor memory has been set-up, do any post allocation configuration for each workload.
     for (auto& workload : m_WorkloadQueue)
@@ -278,7 +278,7 @@
 
 void LoadedNetwork::SendNetworkStructure()
 {
-    Graph& order = m_OptimizedNetwork->GetGraph().TopologicalSort();
+    Graph& order = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().TopologicalSort();
     ProfilingGuid networkGuid = m_OptimizedNetwork->GetGuid();
 
     std::unique_ptr<TimelineUtilityMethods> timelineUtils =
@@ -320,7 +320,7 @@
 
 TensorInfo LoadedNetwork::GetInputTensorInfo(LayerBindingId layerId) const
 {
-    for (auto&& inputLayer : m_OptimizedNetwork->GetGraph().GetInputLayers())
+    for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers())
     {
         ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
         if (inputLayer->GetBindingId() == layerId)
@@ -334,7 +334,7 @@
 
 TensorInfo LoadedNetwork::GetOutputTensorInfo(LayerBindingId layerId) const
 {
-    for (auto&& outputLayer : m_OptimizedNetwork->GetGraph().GetOutputLayers())
+    for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers())
     {
         ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
         ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
@@ -368,7 +368,7 @@
     ARMNN_ASSERT_MSG(IWorkloadFactory::IsLayerSupported(layer,
                                                         {},
                                                         reasonIfUnsupported,
-                                                        m_OptimizedNetwork->GetModelOptions()),
+                                                        m_OptimizedNetwork->pOptimizedNetworkImpl->GetModelOptions()),
         "Factory does not support layer");
     IgnoreUnused(reasonIfUnsupported);
     return *workloadFactory;
@@ -470,7 +470,7 @@
 Status LoadedNetwork::EnqueueWorkload(const InputTensors& inputTensors,
                                       const OutputTensors& outputTensors)
 {
-    const Graph& graph = m_OptimizedNetwork->GetGraph();
+    const Graph& graph = m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph();
 
     // Walk graph to determine the order of execution.
     if (graph.GetNumLayers() < 2)
diff --git a/src/armnn/LoadedNetwork.hpp b/src/armnn/LoadedNetwork.hpp
index 3a44206..c7dd37f 100644
--- a/src/armnn/LoadedNetwork.hpp
+++ b/src/armnn/LoadedNetwork.hpp
@@ -42,7 +42,7 @@
 
     Status EnqueueWorkload(const InputTensors& inputTensors, const OutputTensors& outputTensors);
 
-    static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+    static std::unique_ptr<LoadedNetwork> MakeLoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
                                                             std::string & errorMessage,
                                                             const INetworkProperties& networkProperties,
                                                             profiling::ProfilingService& profilingService);
@@ -63,7 +63,7 @@
 private:
     void AllocateWorkingMemory(std::lock_guard<std::mutex>& lock);
 
-    LoadedNetwork(std::unique_ptr<OptimizedNetwork> net,
+    LoadedNetwork(std::unique_ptr<IOptimizedNetwork> net,
                   const INetworkProperties& networkProperties,
                   profiling::ProfilingService& profilingService);
 
@@ -87,7 +87,7 @@
     BackendPtrMap       m_Backends;
     WorkloadFactoryMap  m_WorkloadFactories;
 
-    std::unique_ptr<OptimizedNetwork> m_OptimizedNetwork;
+    std::unique_ptr<IOptimizedNetwork> m_OptimizedNetwork;
     WorkloadQueue m_InputQueue;
     WorkloadQueue m_WorkloadQueue;
     WorkloadQueue m_OutputQueue;
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index bf7a056..9373a6a 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -38,9 +38,473 @@
 namespace armnn
 {
 
+INetwork::INetwork(NetworkOptions networkOptions) : pNetworkImpl(new NetworkImpl(networkOptions)) {}
+
+INetwork::~INetwork() = default;
+
+Status INetwork::PrintGraph()
+{
+    return pNetworkImpl->PrintGraph();
+}
+
+IConnectableLayer* INetwork::AddInputLayer(LayerBindingId id, const char* name)
+{
+    return pNetworkImpl->AddInputLayer(id, name);
+}
+
+
+IConnectableLayer* INetwork::AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
+                                               const char* name)
+{
+    return pNetworkImpl->AddArgMinMaxLayer(desc, name);
+}
+
+
+IConnectableLayer* INetwork::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
+                                                const char* name)
+{
+    return pNetworkImpl->AddComparisonLayer(comparisonDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
+                                            const char* name)
+{
+    return pNetworkImpl->AddConcatLayer(concatDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+                                                   const ConstTensor& weights,
+                                                   const Optional<ConstTensor>& biases,
+                                                   const char* name)
+{
+    return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
+}
+
+
+IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+                                                   const ConstTensor& weights,
+                                                   const char* name)
+{
+    Optional<ConstTensor> biases;
+    return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
+}
+
+
+IConnectableLayer* INetwork::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+                                                   const ConstTensor& weights,
+                                                   const ConstTensor& biases,
+                                                   const char* name )
+{
+
+    return pNetworkImpl->AddConvolution2dLayer(convolution2dDescriptor,
+                                               weights,
+                                               armnn::Optional<ConstTensor>(biases),
+                                               name);
+}
+
+
+IConnectableLayer* INetwork::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
+                                                  const char* name)
+{
+    return pNetworkImpl->AddDepthToSpaceLayer(depthToSpaceDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
+    const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+    const ConstTensor& weights,
+    const Optional<ConstTensor>& biases,
+    const char* name)
+{
+    return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
+}
+
+
+IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
+    const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+    const ConstTensor& weights,
+    const char* name)
+{
+    Optional<ConstTensor> biases;
+    return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights, biases, name);
+}
+
+
+IConnectableLayer* INetwork::AddDepthwiseConvolution2dLayer(
+    const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
+    const ConstTensor& weights,
+    const ConstTensor& biases,
+    const char* name)
+{
+    return pNetworkImpl->AddDepthwiseConvolution2dLayer(convolution2dDescriptor, weights,
+                                                        armnn::Optional<ConstTensor>(biases), name);
+}
+
+
+IConnectableLayer* INetwork::AddDequantizeLayer(const char* name)
+{
+    return pNetworkImpl->AddDequantizeLayer(name);
+}
+
+
+IConnectableLayer* INetwork::AddDetectionPostProcessLayer(
+    const DetectionPostProcessDescriptor& descriptor,
+    const ConstTensor& anchors,
+    const char* name)
+{
+    return pNetworkImpl->AddDetectionPostProcessLayer(descriptor, anchors, name);
+}
+
+
+IConnectableLayer* INetwork::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+                                                      const char* name)
+{
+    return pNetworkImpl->AddElementwiseUnaryLayer(elementwiseUnaryDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddFillLayer(const FillDescriptor& fillDescriptor,
+                                          const char* name)
+{
+    return pNetworkImpl->AddFillLayer(fillDescriptor, name);
+}
+
+
+IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+                                                    const ConstTensor& weights,
+                                                    const Optional<ConstTensor>& biases,
+                                                    const char* name)
+{
+    return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
+}
+
+IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+                                                    const ConstTensor& weights,
+                                                    const char* name)
+{
+    Optional<ConstTensor> biases;
+    return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights, biases, name);
+}
+
+IConnectableLayer* INetwork::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+                                                    const ConstTensor& weights,
+                                                    const ConstTensor& biases,
+                                                    const char* name)
+{
+    return pNetworkImpl->AddFullyConnectedLayer(fullyConnectedDescriptor, weights,
+                                                armnn::Optional<ConstTensor>(biases), name);
+}
+
+IConnectableLayer* INetwork::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
+                                             const char* name)
+{
+    return pNetworkImpl->AddPermuteLayer(permuteDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+                                                    const char* name)
+{
+    return pNetworkImpl->AddBatchToSpaceNdLayer(batchToSpaceNdDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
+                                               const char* name)
+{
+    return pNetworkImpl->AddPooling2dLayer(pooling2dDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
+                                                const char* name)
+{
+    return pNetworkImpl->AddActivationLayer(activationDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
+                                                   const char* name)
+{
+    return pNetworkImpl->AddNormalizationLayer(normalizationDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
+{
+    return pNetworkImpl->AddSliceLayer(sliceDescriptor, name);
+}
+IConnectableLayer* INetwork::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
+                                             const char* name)
+{
+    return pNetworkImpl->AddSoftmaxLayer(softmaxDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
+                                              const char* name)
+{
+    return pNetworkImpl->AddSplitterLayer(splitterDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddMergeLayer(const char* name)
+{
+    return pNetworkImpl->AddMergeLayer(name);
+}
+
+IConnectableLayer* INetwork::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
+                                            const char* name)
+{
+    return pNetworkImpl->AddConcatLayer(mergerDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddAbsLayer(const char* name)
+{
+    return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
+}
+
+IConnectableLayer* INetwork::AddAdditionLayer(const char* name)
+{
+    return pNetworkImpl->AddAdditionLayer(name);
+}
+
+IConnectableLayer* INetwork::AddMultiplicationLayer(const char* name)
+{
+    return pNetworkImpl->AddMultiplicationLayer(name);
+}
+
+IConnectableLayer* INetwork::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
+                                                        const ConstTensor& mean,
+                                                        const ConstTensor& variance,
+                                                        const ConstTensor& beta,
+                                                        const ConstTensor& gamma,
+                                                        const char* name)
+{
+    return pNetworkImpl->AddBatchNormalizationLayer(desc, mean, variance, beta, gamma, name);
+}
+
+IConnectableLayer* INetwork::AddRankLayer(const char* name)
+{
+    return pNetworkImpl->AddRankLayer(name);
+}
+
+IConnectableLayer* INetwork::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
+                                                    const char* name)
+{
+    ResizeDescriptor resizeDescriptor;
+    resizeDescriptor.m_Method           = ResizeMethod::Bilinear;
+    resizeDescriptor.m_DataLayout       = descriptor.m_DataLayout;
+    resizeDescriptor.m_TargetWidth      = descriptor.m_TargetWidth;
+    resizeDescriptor.m_TargetHeight     = descriptor.m_TargetHeight;
+    resizeDescriptor.m_AlignCorners     = descriptor.m_AlignCorners;
+    resizeDescriptor.m_HalfPixelCenters = descriptor.m_HalfPixelCenters;
+
+    return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
+                                            const char* name)
+{
+    return pNetworkImpl->AddResizeLayer(resizeDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
+                                            const char* name)
+{
+    return pNetworkImpl->AddReduceLayer(reduceDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
+                                                           const char* name)
+{
+    return pNetworkImpl->AddInstanceNormalizationLayer(desc, name);
+}
+
+IConnectableLayer* INetwork::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
+                                                     const char* name)
+{
+    return pNetworkImpl->AddL2NormalizationLayer(desc, name);
+}
+
+IConnectableLayer* INetwork::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
+                                                const char* name)
+{
+    return pNetworkImpl->AddLogSoftmaxLayer(logSoftmaxDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddConstantLayer(const ConstTensor& input,
+                                              const char* name)
+{
+    return pNetworkImpl->AddConstantLayer(input, name);
+}
+
+IConnectableLayer* INetwork::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
+                                            const char* name)
+{
+    return pNetworkImpl->AddReshapeLayer(reshapeDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+                                                   const char* name)
+{
+    return pNetworkImpl->AddSpaceToBatchNdLayer(spaceToBatchNdDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+                                                  const char* name)
+{
+    return pNetworkImpl->AddSpaceToDepthLayer(spaceToDepthDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddFloorLayer(const char* name)
+{
+    return pNetworkImpl->AddFloorLayer(name);
+}
+IConnectableLayer* INetwork::AddOutputLayer(LayerBindingId id, const char* name)
+{
+    return pNetworkImpl->AddOutputLayer(id, name);
+}
+
+IConnectableLayer* INetwork::AddLstmLayer(const LstmDescriptor& descriptor,
+                                          const LstmInputParams& params,
+                                          const char* name)
+{
+    return pNetworkImpl->AddLstmLayer(descriptor, params, name);
+}
+
+IConnectableLayer* INetwork::AddDivisionLayer(const char* name)
+{
+    return pNetworkImpl->AddDivisionLayer(name);
+}
+
+IConnectableLayer* INetwork::AddSubtractionLayer(const char* name)
+{
+    return pNetworkImpl->AddSubtractionLayer(name);
+}
+
+IConnectableLayer* INetwork::AddMaximumLayer(const char* name)
+{
+    return pNetworkImpl->AddMaximumLayer(name);
+}
+
+IConnectableLayer* INetwork::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
+{
+    return pNetworkImpl->AddMeanLayer(meanDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddPadLayer(const PadDescriptor& padDescriptor,
+                                         const char* name)
+{
+    return pNetworkImpl->AddPadLayer(padDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddQuantizeLayer(const char* name)
+{
+    return pNetworkImpl->AddQuantizeLayer(name);
+}
+
+IConnectableLayer* INetwork::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
+                                                  const char* name)
+{
+    return pNetworkImpl->AddStridedSliceLayer(stridedSliceDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddMinimumLayer(const char* name)
+{
+    return pNetworkImpl->AddMinimumLayer(name);
+}
+
+IConnectableLayer* INetwork::AddGreaterLayer(const char* name)
+{
+    return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
+}
+
+IConnectableLayer* INetwork::AddEqualLayer(const char* name)
+{
+    return pNetworkImpl->AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
+}
+
+IConnectableLayer* INetwork::AddRsqrtLayer(const char* name)
+{
+    return pNetworkImpl->AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
+}
+
+IConnectableLayer* INetwork::AddGatherLayer(const char* name)
+{
+    GatherDescriptor gatherDescriptor{};
+    return pNetworkImpl->AddGatherLayer(gatherDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddGatherLayer(const GatherDescriptor& descriptor,
+                                            const char* name)
+{
+    return pNetworkImpl->AddGatherLayer(descriptor, name);
+}
+
+IConnectableLayer* INetwork::AddSwitchLayer(const char* name)
+{
+    return pNetworkImpl->AddSwitchLayer(name);
+}
+
+IConnectableLayer* INetwork::AddPreluLayer(const char* name)
+{
+    return pNetworkImpl->AddPreluLayer(name);
+}
+
+IConnectableLayer* INetwork::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+                                                            const ConstTensor& weights,
+                                                            const Optional<ConstTensor>& biases,
+                                                            const char* name)
+{
+    return pNetworkImpl->AddTransposeConvolution2dLayer(descriptor, weights, biases, name);
+}
+
+IConnectableLayer* INetwork::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+                                               const char* name)
+{
+    return pNetworkImpl->AddTransposeLayer(transposeDescriptor, name);
+}
+
+IConnectableLayer* INetwork::AddStackLayer(const StackDescriptor& descriptor,
+                                           const char* name)
+{
+    return pNetworkImpl->AddStackLayer(descriptor, name);
+}
+
+IConnectableLayer* INetwork::AddStandInLayer(const StandInDescriptor& descriptor,
+                                             const char* name)
+{
+    return pNetworkImpl->AddStandInLayer(descriptor, name);
+}
+
+IConnectableLayer* INetwork::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
+                                                   const char* name)
+{
+    return pNetworkImpl->AddQuantizedLstmLayer(params, name);
+}
+
+IConnectableLayer* INetwork::AddQLstmLayer(const QLstmDescriptor& descriptor,
+                                           const LstmInputParams& params,
+                                           const char* name)
+{
+    return pNetworkImpl->AddQLstmLayer(descriptor, params, name);
+}
+
+IConnectableLayer* INetwork::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& descriptor,
+                                                   const char* name)
+{
+    return pNetworkImpl->AddLogicalBinaryLayer(descriptor, name);
+}
+
+void INetwork::Accept(ILayerVisitor& visitor) const
+{
+    return pNetworkImpl->Accept(visitor);
+}
+
+void INetwork::ExecuteStrategy(IStrategy& strategy) const
+{
+    return pNetworkImpl->ExecuteStrategy(strategy);
+}
+
 armnn::INetwork* INetwork::CreateRaw(NetworkOptions networkOptions)
 {
-    return new Network(networkOptions);
+    return new INetwork(networkOptions);
 }
 
 armnn::INetworkPtr INetwork::Create(NetworkOptions networkOptions)
@@ -50,21 +514,48 @@
 
 void INetwork::Destroy(INetwork* network)
 {
-    delete PolymorphicDowncast<Network*>(network);
+    delete network;
 }
 
+
+IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph)
+    : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph))) {}
+
+IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<OptimizedNetworkImpl> impl)
+    : pOptimizedNetworkImpl(std::move(impl)) {}
+
+IOptimizedNetwork::IOptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
+    : pOptimizedNetworkImpl(new OptimizedNetworkImpl(std::move(graph), modelOptions)) {}
+
+IOptimizedNetwork::~IOptimizedNetwork() = default;
+
 void IOptimizedNetwork::Destroy(IOptimizedNetwork* network)
 {
-    delete PolymorphicDowncast<OptimizedNetwork*>(network);
+    delete network;
 }
 
-Status OptimizedNetwork::PrintGraph()
+Status IOptimizedNetwork::PrintGraph()
+{
+    return pOptimizedNetworkImpl->PrintGraph();
+}
+
+Status IOptimizedNetwork::SerializeToDot(std::ostream& stream) const
+{
+    return pOptimizedNetworkImpl->SerializeToDot(stream);
+}
+
+profiling::ProfilingGuid IOptimizedNetwork::GetGuid() const
+{
+    return pOptimizedNetworkImpl->GetGuid();
+}
+
+Status OptimizedNetworkImpl::PrintGraph()
 {
     m_Graph->Print();
     return Status::Success;
 }
 
-Status OptimizedNetwork::SerializeToDot(std::ostream& stream) const
+Status OptimizedNetworkImpl::SerializeToDot(std::ostream& stream) const
 {
     return m_Graph->SerializeToDot(stream);
 }
@@ -375,7 +866,7 @@
 }
 
 
-OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
                                   BackendSettings& backendSettings,
                                   Graph::Iterator& firstLayer,
                                   Graph::Iterator& lastLayer,
@@ -501,7 +992,7 @@
     return result;
 }
 
-OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
                                   BackendSettings& backendSettings,
                                   SubgraphView& subgraph,
                                   Optional<std::vector<std::string>&> errMessages)
@@ -534,7 +1025,7 @@
     return backends;
 }
 
-OptimizationResult ApplyBackendOptimizations(OptimizedNetwork* optNetObjPtr,
+OptimizationResult ApplyBackendOptimizations(OptimizedNetworkImpl* optNetObjPtr,
                                              BackendSettings& backendSettings,
                                              BackendsMap& backends,
                                              const ModelOptions& modelOptions,
@@ -1024,16 +1515,15 @@
         throw InvalidArgumentException("BFloat16 and Float16 optimization cannot be enabled at the same time.");
     }
 
-    const Network& network = *PolymorphicDowncast<const Network*>(&inNetwork);
-    std::unique_ptr<Graph> graph = std::make_unique<Graph>(network.GetGraph());
+    std::unique_ptr<Graph> graph = std::make_unique<Graph>(inNetwork.pNetworkImpl->GetGraph());
 
-    auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph), options.m_ModelOptions),
+    auto optNet = IOptimizedNetworkPtr(new IOptimizedNetwork(std::move(graph), options.m_ModelOptions),
                                        &IOptimizedNetwork::Destroy);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
+    IOptimizedNetwork* optNetObjPtr = optNet.get();
 
     // Get the optimized graph
-    Graph& optGraph = optNetObjPtr->GetGraph();
+    Graph& optGraph = optNetObjPtr->pOptimizedNetworkImpl->GetGraph();
 
     // Perform AddBroadcastReshapeLayer optimisation
     using namespace optimizations;
@@ -1094,7 +1584,7 @@
     // Assign an available backend to each layer
     Graph::Iterator firstLayer = optGraph.begin();
     Graph::Iterator lastLayer  = optGraph.end();
-    OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr,
+    OptimizationResult assignBackendsResult = AssignBackends(optNetObjPtr->pOptimizedNetworkImpl.get(),
                                                              backendSettings,
                                                              firstLayer,
                                                              lastLayer,
@@ -1109,7 +1599,7 @@
                                                 OptimizeInverseConversionsFp32()));
 
     // Apply the backend-specific optimizations
-    OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr,
+    OptimizationResult backendOptimizationResult = ApplyBackendOptimizations(optNetObjPtr->pOptimizedNetworkImpl.get(),
                                                                              backendSettings,
                                                                              backends,
                                                                              options.m_ModelOptions,
@@ -1159,13 +1649,13 @@
 
         if (!backendSpecificOptimizations.empty())
         {
-            Optimizer::Pass(optNetObjPtr->GetGraph(), backendSpecificOptimizations);
+            Optimizer::Pass(optNetObjPtr->pOptimizedNetworkImpl->GetGraph(), backendSpecificOptimizations);
         }
     }
 
     return optNet;
 }
-bool Network::GetShapeInferenceMethod()
+bool NetworkImpl::GetShapeInferenceMethod()
 {
     if (m_NetworkOptions.size() > 0 && m_NetworkOptions[0].GetBackendId().Get() == "ShapeInferenceMethod")
     {
@@ -1174,51 +1664,51 @@
 
     return false;
 }
-Network::Network(NetworkOptions networkOptions)
+NetworkImpl::NetworkImpl(NetworkOptions networkOptions)
 : m_NetworkOptions(networkOptions),
   m_Graph(std::make_unique<Graph>(GetShapeInferenceMethod()))
 {}
 
-Network::~Network()
+NetworkImpl::~NetworkImpl()
 {
 }
 
-Status Network::PrintGraph()
+Status NetworkImpl::PrintGraph()
 {
     m_Graph->Print();
     return Status::Success;
 }
 
-IConnectableLayer* Network::AddInputLayer(LayerBindingId id, const char* name)
+IConnectableLayer* NetworkImpl::AddInputLayer(LayerBindingId id, const char* name)
 {
     return m_Graph->AddLayer<InputLayer>(id, name);
 }
 
-IConnectableLayer* Network::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
+IConnectableLayer* NetworkImpl::AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                             const char* name)
 {
     return m_Graph->AddLayer<BatchToSpaceNdLayer>(batchToSpaceNdDescriptor, name);
 }
 
-IConnectableLayer* Network::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
+IConnectableLayer* NetworkImpl::AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
                                                const char* name)
 {
     return m_Graph->AddLayer<ComparisonLayer>(comparisonDescriptor, name);
 }
 
-IConnectableLayer* Network::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
+IConnectableLayer* NetworkImpl::AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
                                                      const char* name)
 {
     return m_Graph->AddLayer<ElementwiseUnaryLayer>(elementwiseUnaryDescriptor, name);
 }
 
-IConnectableLayer* Network::AddFillLayer(const FillDescriptor& fillDescriptor,
+IConnectableLayer* NetworkImpl::AddFillLayer(const FillDescriptor& fillDescriptor,
                                          const char* name)
 {
     return m_Graph->AddLayer<FillLayer>(fillDescriptor, name);
 }
 
-IConnectableLayer* Network::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                                        const ConstTensor& weights,
                                                        const Optional<ConstTensor>& biases,
                                                        const char* name)
@@ -1240,7 +1730,7 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                                    const ConstTensor& weights,
                                                    const Optional<ConstTensor>& biases,
                                                    const char* name)
@@ -1248,7 +1738,7 @@
     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
 }
 
-IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                                    const ConstTensor& weights,
                                                    const char* name)
 {
@@ -1256,7 +1746,7 @@
     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, biases, name);
 }
 
-IConnectableLayer* Network::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
+IConnectableLayer* NetworkImpl::AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                                    const ConstTensor& weights,
                                                    const ConstTensor& biases,
                                                    const char* name)
@@ -1265,16 +1755,16 @@
     return AddFullyConnectedLayerImpl(fullyConnectedDescriptor, weights, optionalBiases, name);
 }
 
-IConnectableLayer* Network::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
+IConnectableLayer* NetworkImpl::AddConcatLayer(const ConcatDescriptor& concatDescriptor,
                                            const char* name)
 {
     return m_Graph->AddLayer<ConcatLayer>(concatDescriptor, name);
 }
 
-IConnectableLayer* Network::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
-                                                      const ConstTensor& weights,
-                                                      const Optional<ConstTensor>& biases,
-                                                      const char* name)
+IConnectableLayer* NetworkImpl::AddConvolution2dLayerImpl(const Convolution2dDescriptor& convolution2dDescriptor,
+                                                          const ConstTensor& weights,
+                                                          const Optional<ConstTensor>& biases,
+                                                          const char* name)
 {
     if (convolution2dDescriptor.m_BiasEnabled && !biases.has_value())
     {
@@ -1293,7 +1783,7 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                                   const ConstTensor& weights,
                                                   const Optional<ConstTensor>& biases,
                                                   const char* name)
@@ -1301,7 +1791,7 @@
     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
 }
 
-IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                                   const ConstTensor& weights,
                                                   const char* name)
 {
@@ -1309,7 +1799,7 @@
     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
 }
 
-IConnectableLayer* Network::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
+IConnectableLayer* NetworkImpl::AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                                   const ConstTensor& weights,
                                                   const ConstTensor& biases,
                                                   const char* name)
@@ -1318,7 +1808,7 @@
     return AddConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
 }
 
-IConnectableLayer* Network::AddDepthwiseConvolution2dLayerImpl(
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayerImpl(
     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
     const ConstTensor& weights,
     const Optional<ConstTensor>& biases,
@@ -1341,13 +1831,13 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
+IConnectableLayer* NetworkImpl::AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
                                                  const char* name)
 {
     return m_Graph->AddLayer<DepthToSpaceLayer>(depthToSpaceDescriptor, name);
 }
 
-IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
         const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
         const ConstTensor& weights,
         const Optional<ConstTensor>& biases,
@@ -1356,7 +1846,7 @@
     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
 }
 
-IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
     const ConstTensor& weights,
     const char* name)
@@ -1365,7 +1855,7 @@
     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, biases, name);
 }
 
-IConnectableLayer* Network::AddDepthwiseConvolution2dLayer(
+IConnectableLayer* NetworkImpl::AddDepthwiseConvolution2dLayer(
     const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
     const ConstTensor& weights,
     const ConstTensor& biases,
@@ -1375,7 +1865,7 @@
     return AddDepthwiseConvolution2dLayerImpl(convolution2dDescriptor, weights, optionalBiases, name);
 }
 
-IConnectableLayer* Network::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
+IConnectableLayer* NetworkImpl::AddDetectionPostProcessLayer(const armnn::DetectionPostProcessDescriptor& descriptor,
                                                          const ConstTensor& anchors, const char* name)
 {
     const auto layer = m_Graph->AddLayer<DetectionPostProcessLayer>(descriptor, name);
@@ -1385,91 +1875,91 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
+IConnectableLayer* NetworkImpl::AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
                                             const char* name)
 {
     return m_Graph->AddLayer<PermuteLayer>(permuteDescriptor, name);
 }
 
-IConnectableLayer* Network::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
+IConnectableLayer* NetworkImpl::AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
     const char* name)
 {
     return m_Graph->AddLayer<Pooling2dLayer>(pooling2dDescriptor, name);
 }
 
-IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
+IConnectableLayer* NetworkImpl::AddActivationLayer(const ActivationDescriptor& activationDescriptor,
     const char* name)
 {
     return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
 }
 
-IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
+IConnectableLayer* NetworkImpl::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
                                               const char* name)
 {
     return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
 }
 
-IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
+IConnectableLayer* NetworkImpl::AddNormalizationLayer(const NormalizationDescriptor&
 normalizationDescriptor,
     const char* name)
 {
     return m_Graph->AddLayer<NormalizationLayer>(normalizationDescriptor, name);
 }
 
-IConnectableLayer* Network::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
+IConnectableLayer* NetworkImpl::AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name)
 {
     return m_Graph->AddLayer<SliceLayer>(sliceDescriptor, name);
 }
 
-IConnectableLayer* Network::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
+IConnectableLayer* NetworkImpl::AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
     const char* name)
 {
     return m_Graph->AddLayer<SoftmaxLayer>(softmaxDescriptor, name);
 }
 
-IConnectableLayer* Network::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
+IConnectableLayer* NetworkImpl::AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
     const char* name)
 {
     return m_Graph->AddLayer<SplitterLayer>(splitterDescriptor, name);
 }
 
-IConnectableLayer* Network::AddMaximumLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddMaximumLayer(const char* name)
 {
     return m_Graph->AddLayer<MaximumLayer>(name);
 }
 
-IConnectableLayer* Network::AddMinimumLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddMinimumLayer(const char* name)
 {
     return m_Graph->AddLayer<MinimumLayer>(name);
 }
 
-IConnectableLayer* Network::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
+IConnectableLayer* NetworkImpl::AddMergerLayer(const MergerDescriptor& mergerDescriptor,
                                            const char* name)
 {
     return AddConcatLayer(mergerDescriptor, name);
 }
 
-IConnectableLayer* Network::AddAbsLayer(const char * name)
+IConnectableLayer* NetworkImpl::AddAbsLayer(const char * name)
 {
     return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Abs), name);
 }
 
-IConnectableLayer* Network::AddAdditionLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddAdditionLayer(const char* name)
 {
     return m_Graph->AddLayer<AdditionLayer>(name);
 }
 
-IConnectableLayer* Network::AddMultiplicationLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddMultiplicationLayer(const char* name)
 {
     return m_Graph->AddLayer<MultiplicationLayer>(name);
 }
 
-IConnectableLayer* Network::AddOutputLayer(LayerBindingId id, const char* name)
+IConnectableLayer* NetworkImpl::AddOutputLayer(LayerBindingId id, const char* name)
 {
     return m_Graph->AddLayer<OutputLayer>(id, name);
 }
 
-IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
+IConnectableLayer* NetworkImpl::AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
                                                        const ConstTensor&                  mean,
                                                        const ConstTensor&                  variance,
                                                        const ConstTensor&                  beta,
@@ -1486,19 +1976,19 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddRankLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddRankLayer(const char* name)
 {
     return m_Graph->AddLayer<RankLayer>(name);
 }
 
-IConnectableLayer* Network::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
-                                           const char* name)
+IConnectableLayer* NetworkImpl::AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
+                                               const char* name)
 {
     return m_Graph->AddLayer<ReduceLayer>(reduceDescriptor, name);
 }
 
-IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
-                                                   const char* name)
+IConnectableLayer* NetworkImpl::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
+                                                       const char* name)
 {
     ResizeDescriptor resizeDescriptor;
     resizeDescriptor.m_Method           = ResizeMethod::Bilinear;
@@ -1511,31 +2001,30 @@
     return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
 }
 
-IConnectableLayer* Network::AddResizeLayer(const ResizeDescriptor&
-resizeDescriptor, const char* name)
+IConnectableLayer* NetworkImpl::AddResizeLayer(const ResizeDescriptor& resizeDescriptor, const char* name)
 {
     return m_Graph->AddLayer<ResizeLayer>(resizeDescriptor, name);
 }
 
-IConnectableLayer* Network::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
-                                                          const char* name)
+IConnectableLayer* NetworkImpl::AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
+                                                              const char* name)
 {
     return m_Graph->AddLayer<InstanceNormalizationLayer>(desc, name);
 }
 
-IConnectableLayer* Network::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
-                                                    const char* name)
+IConnectableLayer* NetworkImpl::AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
+                                                        const char* name)
 {
     return m_Graph->AddLayer<L2NormalizationLayer>(desc, name);
 }
 
-IConnectableLayer* Network::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
+IConnectableLayer* NetworkImpl::AddLogSoftmaxLayer(const LogSoftmaxDescriptor& desc,
                                                const char* name)
 {
     return m_Graph->AddLayer<LogSoftmaxLayer>(desc, name);
 }
 
-IConnectableLayer* Network::AddConstantLayer(const ConstTensor& input, const char* name)
+IConnectableLayer* NetworkImpl::AddConstantLayer(const ConstTensor& input, const char* name)
 {
     auto layer = m_Graph->AddLayer<ConstantLayer>(name);
 
@@ -1544,30 +2033,30 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
+IConnectableLayer* NetworkImpl::AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
                                             const char* name)
 {
     return m_Graph->AddLayer<ReshapeLayer>(reshapeDescriptor, name);
 }
 
-IConnectableLayer* Network::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
+IConnectableLayer* NetworkImpl::AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
                                                    const char* name)
 {
     return m_Graph->AddLayer<SpaceToBatchNdLayer>(spaceToBatchNdDescriptor, name);
 }
 
-IConnectableLayer* Network::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
+IConnectableLayer* NetworkImpl::AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
                                                  const char* name)
 {
     return m_Graph->AddLayer<SpaceToDepthLayer>(spaceToDepthDescriptor, name);
 }
 
-IConnectableLayer* Network::AddFloorLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddFloorLayer(const char* name)
 {
     return m_Graph->AddLayer<FloorLayer>(name);
 }
 
-IConnectableLayer* Network::AddLstmLayer(const LstmDescriptor&  descriptor,
+IConnectableLayer* NetworkImpl::AddLstmLayer(const LstmDescriptor&  descriptor,
                                          const LstmInputParams& params,
                                          const char* name)
 {
@@ -1708,85 +2197,85 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddDivisionLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddDivisionLayer(const char* name)
 {
     return m_Graph->AddLayer<DivisionLayer>(name);
 }
 
-IConnectableLayer* Network::AddSubtractionLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddSubtractionLayer(const char* name)
 {
     return m_Graph->AddLayer<SubtractionLayer>(name);
 }
 
-IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
+IConnectableLayer* NetworkImpl::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
 {
     return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
 }
 
-IConnectableLayer* Network::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
+IConnectableLayer* NetworkImpl::AddPadLayer(const PadDescriptor& padDescriptor, const char* name)
 {
     return m_Graph->AddLayer<PadLayer>(padDescriptor,name);
 }
 
-IConnectableLayer *Network::AddQuantizeLayer(const char *name)
+IConnectableLayer *NetworkImpl::AddQuantizeLayer(const char *name)
 {
     return m_Graph->AddLayer<QuantizeLayer>(name);
 }
 
-IConnectableLayer* Network::AddDequantizeLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddDequantizeLayer(const char* name)
 {
     return m_Graph->AddLayer<DequantizeLayer>(name);
 }
 
-IConnectableLayer* Network::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
+IConnectableLayer* NetworkImpl::AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
                                                  const char* name)
 {
     return m_Graph->AddLayer<StridedSliceLayer>(stridedSliceDescriptor, name);
 }
 
-IConnectableLayer* Network::AddGreaterLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddGreaterLayer(const char* name)
 {
     return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Greater), name);
 }
 
-IConnectableLayer* Network::AddEqualLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddEqualLayer(const char* name)
 {
     return AddComparisonLayer(ComparisonDescriptor(ComparisonOperation::Equal), name);
 }
 
-IConnectableLayer* Network::AddRsqrtLayer(const char * name)
+IConnectableLayer* NetworkImpl::AddRsqrtLayer(const char * name)
 {
     return AddElementwiseUnaryLayer(ElementwiseUnaryDescriptor(UnaryOperation::Rsqrt), name);
 }
 
-IConnectableLayer* Network::AddGatherLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddGatherLayer(const char* name)
 {
     GatherDescriptor gatherDescriptor{};
     return AddGatherLayer(gatherDescriptor, name);
 }
 
-IConnectableLayer* Network::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
+IConnectableLayer* NetworkImpl::AddGatherLayer(const GatherDescriptor& gatherDescriptor,
                                            const char* name)
 {
     return m_Graph->AddLayer<GatherLayer>(gatherDescriptor, name);
 }
 
-IConnectableLayer* Network::AddMergeLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddMergeLayer(const char* name)
 {
     return m_Graph->AddLayer<MergeLayer>(name);
 }
 
-IConnectableLayer* Network::AddSwitchLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddSwitchLayer(const char* name)
 {
     return m_Graph->AddLayer<SwitchLayer>(name);
 }
 
-IConnectableLayer* Network::AddPreluLayer(const char* name)
+IConnectableLayer* NetworkImpl::AddPreluLayer(const char* name)
 {
     return m_Graph->AddLayer<PreluLayer>(name);
 }
 
-IConnectableLayer* Network::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
+IConnectableLayer* NetworkImpl::AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
                                                            const ConstTensor& weights,
                                                            const Optional<ConstTensor>& biases,
                                                            const char* name)
@@ -1808,26 +2297,26 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
+IConnectableLayer* NetworkImpl::AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
                                               const char* name)
 {
     return m_Graph->AddLayer<TransposeLayer>(transposeDescriptor, name);
 }
 
-IConnectableLayer* Network::AddStackLayer(const StackDescriptor& stackDescriptor,
+IConnectableLayer* NetworkImpl::AddStackLayer(const StackDescriptor& stackDescriptor,
                                           const char* name)
 {
     return m_Graph->AddLayer<StackLayer>(stackDescriptor, name);
 }
 
 
-IConnectableLayer* Network::AddStandInLayer(const StandInDescriptor& desc,
+IConnectableLayer* NetworkImpl::AddStandInLayer(const StandInDescriptor& desc,
                                             const char* name)
 {
     return m_Graph->AddLayer<StandInLayer>(desc, name);
 }
 
-IConnectableLayer* Network::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
+IConnectableLayer* NetworkImpl::AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
                                                   const char* name)
 {
     const auto layer = m_Graph->AddLayer<QuantizedLstmLayer>(name);
@@ -1865,7 +2354,7 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddQLstmLayer(const QLstmDescriptor&  descriptor,
+IConnectableLayer* NetworkImpl::AddQLstmLayer(const QLstmDescriptor&  descriptor,
                                           const LstmInputParams& params,
                                           const char* name)
 {
@@ -2007,13 +2496,13 @@
     return layer;
 }
 
-IConnectableLayer* Network::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
+IConnectableLayer* NetworkImpl::AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
                                                   const char* name)
 {
     return m_Graph->AddLayer<LogicalBinaryLayer>(logicalBinaryDescriptor, name);
 }
 
-void Network::Accept(ILayerVisitor& visitor) const
+void NetworkImpl::Accept(ILayerVisitor& visitor) const
 {
     for (auto layer : GetGraph())
     {
@@ -2021,7 +2510,7 @@
     };
 }
 
-void Network::ExecuteStrategy(IStrategy& strategy) const
+void NetworkImpl::ExecuteStrategy(IStrategy& strategy) const
 {
     for (auto layer : GetGraph())
     {
@@ -2029,17 +2518,17 @@
     };
 }
 
-OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
+OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph)
     : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid())
 {
 }
 
-OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
+OptimizedNetworkImpl::OptimizedNetworkImpl(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions)
     : m_Graph(std::move(graph)), m_Guid(profiling::ProfilingService::GetNextGuid()), m_ModelOptions(modelOptions)
 {
 }
 
-OptimizedNetwork::~OptimizedNetwork()
+OptimizedNetworkImpl::~OptimizedNetworkImpl()
 {
 }
 
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index cffade5..8f16be1 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -19,246 +19,249 @@
 
 #include "Graph.hpp"
 #include "Layer.hpp"
+#include "OptimizedNetworkImpl.hpp"
 
 namespace armnn
 {
 class Graph;
 
+using NetworkImplPtr = std::unique_ptr<NetworkImpl, void(*)(NetworkImpl* network)>;
+
 /// Private implementation of INetwork.
-class Network final : public INetwork
+class NetworkImpl
 {
 public:
-    Network(NetworkOptions networkOptions = {});
-    ~Network();
+    NetworkImpl(NetworkOptions networkOptions = {});
+    ~NetworkImpl();
 
     const Graph& GetGraph() const { return *m_Graph; }
 
-    Status PrintGraph() override;
+    Status PrintGraph();
 
-    IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override;
+    IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr);
 
     IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
-                                         const char* name = nullptr) override;
+                                         const char* name = nullptr);
 
     IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
-                                              const char* name = nullptr) override;
+                                              const char* name = nullptr);
 
     IConnectableLayer* AddComparisonLayer(const ComparisonDescriptor& comparisonDescriptor,
-                                          const char* name = nullptr) override;
+                                          const char* name = nullptr);
 
     IConnectableLayer* AddConcatLayer(const ConcatDescriptor& concatDescriptor,
-                                      const char* name = nullptr) override;
+                                      const char* name = nullptr);
 
     IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                              const ConstTensor& weights,
                                              const Optional<ConstTensor>& biases,
-                                             const char* name = nullptr) override;
+                                             const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
     IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                              const ConstTensor& weights,
-                                             const char* name = nullptr) override;
+                                             const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("This AddConvolution2dLayer overload is deprecated")
     IConnectableLayer* AddConvolution2dLayer(const Convolution2dDescriptor& convolution2dDescriptor,
                                              const ConstTensor& weights,
                                              const ConstTensor& biases,
-                                             const char* name = nullptr) override;
+                                             const char* name = nullptr);
 
     IConnectableLayer* AddDepthToSpaceLayer(const DepthToSpaceDescriptor& depthToSpaceDescriptor,
-                                            const char* name = nullptr) override;
+                                            const char* name = nullptr);
 
     IConnectableLayer* AddDepthwiseConvolution2dLayer(
         const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
         const ConstTensor& weights,
         const Optional<ConstTensor>& biases,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
     IConnectableLayer* AddDepthwiseConvolution2dLayer(
         const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
         const ConstTensor& weights,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("This AddDepthwiseConvolution2dLayer overload is deprecated")
     IConnectableLayer* AddDepthwiseConvolution2dLayer(
         const DepthwiseConvolution2dDescriptor& convolution2dDescriptor,
         const ConstTensor& weights,
         const ConstTensor& biases,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
-    IConnectableLayer* AddDequantizeLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddDequantizeLayer(const char* name = nullptr);
 
     IConnectableLayer* AddDetectionPostProcessLayer(
         const DetectionPostProcessDescriptor& descriptor,
         const ConstTensor& anchors,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
     IConnectableLayer* AddElementwiseUnaryLayer(const ElementwiseUnaryDescriptor& elementwiseUnaryDescriptor,
-                                                const char* name = nullptr) override;
+                                                const char* name = nullptr);
 
     IConnectableLayer* AddFillLayer(const FillDescriptor& fillDescriptor,
-                                    const char* name = nullptr) override;
+                                    const char* name = nullptr);
 
     IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                               const ConstTensor& weights,
                                               const Optional<ConstTensor>& biases,
-                                              const char* name = nullptr) override;
+                                              const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
     IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                               const ConstTensor& weights,
-                                              const char* name = nullptr) override;
+                                              const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("This AddFullyConnectedLayer overload is deprecated")
     IConnectableLayer* AddFullyConnectedLayer(const FullyConnectedDescriptor& fullyConnectedDescriptor,
                                               const ConstTensor& weights,
                                               const ConstTensor& biases,
-                                              const char* name = nullptr) override;
+                                              const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("This AddGatherLayer overload is deprecated")
-    IConnectableLayer* AddGatherLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddGatherLayer(const char* name = nullptr);
 
     IConnectableLayer* AddGatherLayer(const GatherDescriptor& gatherDescriptor,
-                                      const char* name = nullptr) override;
+                                      const char* name = nullptr);
 
     IConnectableLayer* AddPermuteLayer(const PermuteDescriptor& permuteDescriptor,
-                                       const char* name = nullptr) override;
+                                       const char* name = nullptr);
 
     IConnectableLayer* AddPooling2dLayer(const Pooling2dDescriptor& pooling2dDescriptor,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
     IConnectableLayer* AddActivationLayer(const ActivationDescriptor& activationDescriptor,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
     IConnectableLayer* AddNormalizationLayer(const NormalizationDescriptor& normalizationDescriptor,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
-    IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr) override;
+    IConnectableLayer* AddSliceLayer(const SliceDescriptor& sliceDescriptor, const char* name = nullptr);
 
     IConnectableLayer* AddSoftmaxLayer(const SoftmaxDescriptor& softmaxDescriptor,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
     IConnectableLayer* AddSplitterLayer(const ViewsDescriptor& splitterDescriptor,
-        const char* name = nullptr) override;
+        const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("Use AddConcatLayer instead")
     IConnectableLayer* AddMergerLayer(const MergerDescriptor& mergerDescriptor,
-                                      const char* name = nullptr) override;
+                                      const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
-    IConnectableLayer* AddAbsLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddAbsLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddAdditionLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddAdditionLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddMultiplicationLayer(const char* name = nullptr);
 
     IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
                                                   const ConstTensor&                  mean,
                                                   const ConstTensor&                  variance,
                                                   const ConstTensor&                  beta,
                                                   const ConstTensor&                  gamma,
-                                                  const char*                         name = nullptr) override;
+                                                  const char*                         name = nullptr);
 
-    IConnectableLayer* AddRankLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddRankLayer(const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
     IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
-                                              const char* name = nullptr) override;
+                                              const char* name = nullptr);
 
     IConnectableLayer* AddResizeLayer(const ResizeDescriptor& resizeDescriptor,
-                                      const char* name = nullptr) override;
+                                      const char* name = nullptr);
 
     IConnectableLayer* AddReduceLayer(const ReduceDescriptor& reduceDescriptor,
-                                      const char* name = nullptr) override;
+                                      const char* name = nullptr);
 
     IConnectableLayer* AddInstanceNormalizationLayer(const InstanceNormalizationDescriptor& desc,
-                                                     const char* name = nullptr) override;
+                                                     const char* name = nullptr);
 
     IConnectableLayer* AddL2NormalizationLayer(const L2NormalizationDescriptor& desc,
-                                               const char* name = nullptr) override;
+                                               const char* name = nullptr);
 
     IConnectableLayer* AddLogSoftmaxLayer(const LogSoftmaxDescriptor& logSoftmaxDescriptor,
-                                          const char* name = nullptr) override;
+                                          const char* name = nullptr);
 
-    IConnectableLayer* AddConstantLayer(const ConstTensor& input, const char* name = nullptr) override;
+    IConnectableLayer* AddConstantLayer(const ConstTensor& input, const char* name = nullptr);
 
     IConnectableLayer* AddReshapeLayer(const ReshapeDescriptor& reshapeDescriptor,
-                                       const char* name = nullptr) override;
+                                       const char* name = nullptr);
 
     IConnectableLayer* AddSpaceToBatchNdLayer(const SpaceToBatchNdDescriptor& spaceToBatchNdDescriptor,
-                                              const char* name = nullptr) override;
+                                              const char* name = nullptr);
 
     IConnectableLayer* AddSpaceToDepthLayer(const SpaceToDepthDescriptor& spaceToDepthDescriptor,
-                                            const char* name = nullptr) override;
+                                            const char* name = nullptr);
 
-    IConnectableLayer* AddFloorLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddFloorLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr) override;
+    IConnectableLayer* AddOutputLayer(LayerBindingId id, const char* name = nullptr);
 
     IConnectableLayer* AddLstmLayer(const LstmDescriptor& descriptor,
                                     const LstmInputParams& params,
-                                    const char* name = nullptr) override;
+                                    const char* name = nullptr);
 
-    IConnectableLayer* AddDivisionLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddDivisionLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddSubtractionLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddMaximumLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddMaximumLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) override;
+    IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr);
 
-    IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor, const char* name = nullptr) override;
+    IConnectableLayer* AddPadLayer(const PadDescriptor& padDescriptor, const char* name = nullptr);
 
-    IConnectableLayer* AddQuantizeLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddQuantizeLayer(const char* name = nullptr);
 
     IConnectableLayer* AddStridedSliceLayer(const StridedSliceDescriptor& stridedSliceDescriptor,
-                                            const char* name = nullptr) override;
+                                            const char* name = nullptr);
 
-    IConnectableLayer* AddMinimumLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddMinimumLayer(const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
-    IConnectableLayer* AddGreaterLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddGreaterLayer(const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("Use AddComparisonLayer instead")
-    IConnectableLayer* AddEqualLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddEqualLayer(const char* name = nullptr);
 
     ARMNN_DEPRECATED_MSG("Use AddElementwiseUnaryLayer instead")
-    IConnectableLayer* AddRsqrtLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddRsqrtLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddMergeLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddMergeLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddSwitchLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddSwitchLayer(const char* name = nullptr);
 
-    IConnectableLayer* AddPreluLayer(const char* name = nullptr) override;
+    IConnectableLayer* AddPreluLayer(const char* name = nullptr);
 
     IConnectableLayer* AddTransposeConvolution2dLayer(const TransposeConvolution2dDescriptor& descriptor,
                                                       const ConstTensor& weights,
                                                       const Optional<ConstTensor>& biases,
-                                                      const char* name = nullptr) override;
+                                                      const char* name = nullptr);
 
     IConnectableLayer* AddTransposeLayer(const TransposeDescriptor& transposeDescriptor,
-                                         const char* name = nullptr) override;
+                                         const char* name = nullptr);
 
     IConnectableLayer* AddStackLayer(const StackDescriptor& stackDescriptor,
-                                     const char* name = nullptr) override;
+                                     const char* name = nullptr);
 
     IConnectableLayer* AddStandInLayer(const StandInDescriptor& descriptor,
-                                       const char* name = nullptr) override;
+                                       const char* name = nullptr);
 
     IConnectableLayer* AddQLstmLayer(const QLstmDescriptor& descriptor,
                                      const LstmInputParams& params,
-                                     const char* name = nullptr) override;
+                                     const char* name = nullptr);
 
     IConnectableLayer* AddQuantizedLstmLayer(const QuantizedLstmInputParams& params,
-                                             const char* name = nullptr) override;
+                                             const char* name = nullptr);
 
     IConnectableLayer* AddLogicalBinaryLayer(const LogicalBinaryDescriptor& logicalBinaryDescriptor,
-                                             const char* name = nullptr) override;
+                                             const char* name = nullptr);
 
-    void Accept(ILayerVisitor& visitor) const override;
+    void Accept(ILayerVisitor& visitor) const;
 
-    void ExecuteStrategy(IStrategy& strategy) const override;
+    void ExecuteStrategy(IStrategy& strategy) const;
 
 private:
     IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
@@ -284,29 +287,6 @@
     ModelOptions m_ModelOptions;
 };
 
-class OptimizedNetwork final : public IOptimizedNetwork
-{
-public:
-    OptimizedNetwork(std::unique_ptr<Graph> graph);
-    OptimizedNetwork(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
-    ~OptimizedNetwork();
-
-    Status PrintGraph() override;
-    Status SerializeToDot(std::ostream& stream) const override;
-
-    profiling::ProfilingGuid GetGuid() const final { return m_Guid; };
-
-    Graph& GetGraph() { return *m_Graph; }
-    ModelOptions& GetModelOptions() { return m_ModelOptions; }
-
-private:
-    std::unique_ptr<Graph> m_Graph;
-    profiling::ProfilingGuid m_Guid;
-    ModelOptions m_ModelOptions;
-};
-
-
-
 struct OptimizationResult
 {
     bool m_Warning;
@@ -338,7 +318,7 @@
                                               bool importEnabled,
                                               Optional<std::vector<std::string>&> errMessages);
 
-OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
+OptimizationResult AssignBackends(OptimizedNetworkImpl* optNetObjPtr,
                                   BackendSettings& backendSettings,
                                   Graph::Iterator& firstLayer,
                                   Graph::Iterator& lastLayer,
diff --git a/src/armnn/NetworkQuantizer.cpp b/src/armnn/NetworkQuantizer.cpp
index eed3f41..06d8c5d 100644
--- a/src/armnn/NetworkQuantizer.cpp
+++ b/src/armnn/NetworkQuantizer.cpp
@@ -50,7 +50,7 @@
 
 void NetworkQuantizer::OverrideInputRange(LayerBindingId layerId, float min, float max)
 {
-    const Graph& graph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph();
+    const Graph& graph = m_InputNetwork->pNetworkImpl->GetGraph();
     auto inputLayers = graph.GetInputLayers();
 
     // Walk the input layers of the graph and override the quantization parameters of the one with the given id
@@ -69,7 +69,7 @@
     {
         m_RefineCount = 0;
         m_Ranges.SetDynamicMode(true);
-        const Graph& cGraph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort();
+        const Graph& cGraph = m_InputNetwork->pNetworkImpl->GetGraph().TopologicalSort();
 
         // need to insert Debug layers in the DynamicQuantizationStrategy
         Graph& graph = const_cast<Graph&>(cGraph);
@@ -136,7 +136,7 @@
 
 INetworkPtr NetworkQuantizer::ExportNetwork()
 {
-    const Graph& graph = PolymorphicDowncast<const Network*>(m_InputNetwork)->GetGraph().TopologicalSort();
+    const Graph& graph = m_InputNetwork->pNetworkImpl->GetGraph().TopologicalSort();
 
     // Step 1) Walk the graph and populate default min/max values for
     // intermediate tensors, only if Runtime does not exist (created
diff --git a/src/armnn/OptimizedNetworkImpl.hpp b/src/armnn/OptimizedNetworkImpl.hpp
new file mode 100644
index 0000000..25bf9ca
--- /dev/null
+++ b/src/armnn/OptimizedNetworkImpl.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+namespace armnn
+{
+
+class OptimizedNetworkImpl
+{
+public:
+    OptimizedNetworkImpl(std::unique_ptr<Graph> graph);
+    OptimizedNetworkImpl(std::unique_ptr<Graph> graph, const ModelOptions& modelOptions);
+    virtual ~OptimizedNetworkImpl();
+
+    virtual Status PrintGraph();
+    virtual Status SerializeToDot(std::ostream& stream) const;
+
+    virtual profiling::ProfilingGuid GetGuid() const { return m_Guid; };
+
+    Graph& GetGraph() { return *m_Graph; }
+    ModelOptions& GetModelOptions() { return m_ModelOptions; }
+
+private:
+    std::unique_ptr<Graph> m_Graph;
+    profiling::ProfilingGuid m_Guid;
+    ModelOptions m_ModelOptions;
+};
+
+}
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index 8fdc4f1..9cc7b2c 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -135,7 +135,7 @@
     }
 
     unique_ptr<LoadedNetwork> loadedNetwork = LoadedNetwork::MakeLoadedNetwork(
-        std::unique_ptr<OptimizedNetwork>(PolymorphicDowncast<OptimizedNetwork*>(rawNetwork)),
+        std::unique_ptr<IOptimizedNetwork>(rawNetwork),
         errorMessage,
         networkProperties,
         m_ProfilingService);
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index ab83a89..f3485c7 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -282,7 +282,7 @@
 
     TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional());
     layer->Accept(visitor);
@@ -306,7 +306,7 @@
 
     TestConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, EmptyOptional(), layerName);
     layer->Accept(visitor);
@@ -335,7 +335,7 @@
 
     TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases);
     layer->Accept(visitor);
@@ -365,7 +365,7 @@
 
     TestConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
     layer->Accept(visitor);
@@ -388,7 +388,7 @@
 
     TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional());
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, EmptyOptional());
     layer->Accept(visitor);
@@ -412,7 +412,7 @@
 
     TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, EmptyOptional(), layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor,
                                                                         weights,
@@ -444,7 +444,7 @@
 
     TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases);
     layer->Accept(visitor);
@@ -474,7 +474,7 @@
 
     TestDepthwiseConvolution2dLayerVisitor visitor(descriptor, weights, optionalBiases, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddDepthwiseConvolution2dLayer(descriptor, weights, optionalBiases, layerName);
     layer->Accept(visitor);
@@ -491,7 +491,7 @@
 
     TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional());
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional());
     layer->Accept(visitor);
@@ -509,7 +509,7 @@
 
     TestFullyConnectedLayerVistor visitor(descriptor, weights, EmptyOptional(), layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, EmptyOptional(), layerName);
     layer->Accept(visitor);
@@ -532,7 +532,7 @@
 
     TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases);
     layer->Accept(visitor);
@@ -556,7 +556,7 @@
 
     TestFullyConnectedLayerVistor visitor(descriptor, weights, optionalBiases, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddFullyConnectedLayer(descriptor, weights, optionalBiases, layerName);
     layer->Accept(visitor);
@@ -586,7 +586,7 @@
 
     TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddBatchNormalizationLayer(descriptor, mean, variance, beta, gamma);
     layer->Accept(visitor);
@@ -617,7 +617,7 @@
 
     TestBatchNormalizationLayerVisitor visitor(descriptor, mean, variance, beta, gamma, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddBatchNormalizationLayer(
             descriptor, mean, variance, beta, gamma, layerName);
@@ -632,7 +632,7 @@
 
     TestConstantLayerVisitor visitor(input);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConstantLayer(input);
     layer->Accept(visitor);
@@ -647,7 +647,7 @@
 
     TestConstantLayerVisitor visitor(input, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddConstantLayer(input, layerName);
     layer->Accept(visitor);
@@ -719,7 +719,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -792,7 +792,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
     layer->Accept(visitor);
@@ -883,7 +883,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -975,7 +975,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
     layer->Accept(visitor);
@@ -1062,7 +1062,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -1176,7 +1176,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -1263,7 +1263,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
     layer->Accept(visitor);
@@ -1350,7 +1350,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -1437,7 +1437,7 @@
 
     TestLstmLayerVisitor visitor(descriptor, params, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddLstmLayer(descriptor, params, layerName);
     layer->Accept(visitor);
@@ -1509,7 +1509,7 @@
 
     TestQLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -1582,7 +1582,7 @@
 
     TestQLstmLayerVisitor visitor(descriptor, params, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params, layerName);
     layer->Accept(visitor);
@@ -1677,7 +1677,7 @@
 
     TestQLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -1794,7 +1794,7 @@
 
     TestQLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -1884,7 +1884,7 @@
 
     TestQLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -1974,7 +1974,7 @@
 
     TestQLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -2097,7 +2097,7 @@
 
     TestQLstmLayerVisitor visitor(descriptor, params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQLstmLayer(descriptor, params);
     layer->Accept(visitor);
@@ -2187,7 +2187,7 @@
 
     TestQuantizedLstmLayerVisitor visitor(params);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params);
     layer->Accept(visitor);
@@ -2277,7 +2277,7 @@
 
     TestQuantizedLstmLayerVisitor visitor(params, layerName);
 
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer* const layer = net.AddQuantizedLstmLayer(params, layerName);
     layer->Accept(visitor);
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index c07bf6a..3ea2c35 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -1974,11 +1974,11 @@
 {
     IgnoreUnused(graph);
 
-    // To create a PreCompiled layer, create a network and Optimize it.
-    armnn::Network net;
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
 
     // Add an input layer
-    armnn::IConnectableLayer* const inputLayer = net.AddInputLayer(0, "input layer");
+    armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer");
     BOOST_TEST(inputLayer);
 
     // ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW
@@ -2021,7 +2021,7 @@
         armnn::ConstTensor biases(biasTensorInfo, biasData);
 
         // Create convolution layer with biases
-        convLayer = net.AddConvolution2dLayer(convDesc2d,
+        convLayer = net->AddConvolution2dLayer(convDesc2d,
                                               weights,
                                               Optional<ConstTensor>(biases),
                                               convLayerName.c_str());
@@ -2029,7 +2029,7 @@
     else
     {
         // Create convolution layer without biases
-        convLayer = net.AddConvolution2dLayer(convDesc2d,
+        convLayer = net->AddConvolution2dLayer(convDesc2d,
                                               weights,
                                               EmptyOptional(),
                                               convLayerName.c_str());
@@ -2038,7 +2038,7 @@
     BOOST_TEST(convLayer);
 
     // Add an output layer
-    armnn::IConnectableLayer* const outputLayer = net.AddOutputLayer(0, "output layer");
+    armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer");
     BOOST_TEST(outputLayer);
 
     // set the tensors in the network (NHWC format)
@@ -2068,12 +2068,12 @@
     armnn::IRuntime::CreationOptions options;
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
     armnn::OptimizerOptions optimizerOptions;
-    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(),
+    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
                                                                optimizerOptions);
     BOOST_CHECK(optimizedNet != nullptr);
 
     // Find the PreCompiled layer in the optimised graph
-    armnn::Graph& optimisedGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+    armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get());
     Layer* preCompiledLayer = nullptr;
     for (auto& layer : optimisedGraph)
     {
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index ef270d9..692d64e 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -31,7 +31,7 @@
 
 BOOST_AUTO_TEST_CASE(LayerGuids)
 {
-    armnn::Network net;
+    armnn::NetworkImpl net;
     armnn::LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
     armnn::LayerGuid addId = net.AddAdditionLayer()->GetGuid();
     armnn::LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
@@ -43,23 +43,22 @@
 
 BOOST_AUTO_TEST_CASE(NetworkBasic)
 {
-    armnn::Network net;
+    armnn::NetworkImpl net;
     BOOST_TEST(net.PrintGraph() == armnn::Status::Success);
 }
 
 BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForINetwork)
 {
-    armnn::Network net;
-    armnn::INetwork& inet = net;
-    inet.AddInputLayer(0);
-    inet.AddAdditionLayer();
-    inet.AddActivationLayer(armnn::ActivationDescriptor());
-    inet.AddOutputLayer(0);
+    armnn::INetworkPtr inet(armnn::INetwork::Create());
+    inet->AddInputLayer(0);
+    inet->AddAdditionLayer();
+    inet->AddActivationLayer(armnn::ActivationDescriptor());
+    inet->AddOutputLayer(0);
 }
 
 BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForNetwork)
 {
-    armnn::Network net;
+    armnn::NetworkImpl net;
     net.AddInputLayer(0);
     net.AddAdditionLayer();
     net.AddActivationLayer(armnn::ActivationDescriptor());
@@ -68,7 +67,7 @@
 
 BOOST_AUTO_TEST_CASE(NetworkModification)
 {
-    armnn::Network net;
+    armnn::NetworkImpl net;
 
     armnn::IConnectableLayer* const inputLayer = net.AddInputLayer(0, "input layer");
     BOOST_TEST(inputLayer);
@@ -228,7 +227,7 @@
 
 BOOST_AUTO_TEST_CASE(NetworkModification_SplitterConcat)
 {
-    armnn::Network net;
+    armnn::NetworkImpl net;
 
     // Adds an input layer and an input tensor descriptor.
     armnn::IConnectableLayer* inputLayer = net.AddInputLayer(0, "input layer");
@@ -285,7 +284,7 @@
 
 BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition)
 {
-    armnn::Network net;
+    armnn::NetworkImpl net;
 
     // Adds an input layer and an input tensor descriptor.
     armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
@@ -330,7 +329,7 @@
 
 BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication)
 {
-    armnn::Network net;
+    armnn::NetworkImpl net;
 
     // Adds an input layer and an input tensor descriptor.
     armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
@@ -476,7 +475,7 @@
 BOOST_AUTO_TEST_CASE(StandInLayerNetworkTest)
 {
     // Create a simple network with a StandIn some place in it.
-    armnn::Network net;
+    armnn::NetworkImpl net;
     auto input = net.AddInputLayer(0);
 
     // Add some valid layer.
@@ -509,7 +508,7 @@
 BOOST_AUTO_TEST_CASE(StandInLayerSingleInputMultipleOutputsNetworkTest)
 {
     // Another test with one input and two outputs on the StandIn layer.
-    armnn::Network net;
+    armnn::NetworkImpl net;
 
     // Create the input.
     auto input = net.AddInputLayer(0);
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index e7eab9d..fa860ab 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -756,12 +756,10 @@
     input->GetOutputSlot(0).Connect(act->GetInputSlot(0));
     act->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    auto optNet = IOptimizedNetworkPtr(new OptimizedNetwork(std::move(graph)), &IOptimizedNetwork::Destroy);
-
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
+    OptimizedNetworkImpl optNet(std::move(graph));
 
     // Get the optimized graph
-    Graph& optGraph = optNetObjPtr->GetGraph();
+    Graph& optGraph = optNet.GetGraph();
 
     std::vector<BackendId> prefs{"MockBackend", "CustomBackend"};
 
@@ -773,6 +771,8 @@
     // Assign an available backend to each layer
     Graph::Iterator firstLayer = optGraph.begin();
     Graph::Iterator lastLayer  = optGraph.end();
+
+    OptimizedNetworkImpl* optNetObjPtr = &optNet;
     OptimizationResult res = AssignBackends(optNetObjPtr,
                                             backendSettings,
                                             firstLayer,
diff --git a/src/armnn/test/QuantizerTest.cpp b/src/armnn/test/QuantizerTest.cpp
index 67d0f95..a932698 100644
--- a/src/armnn/test/QuantizerTest.cpp
+++ b/src/armnn/test/QuantizerTest.cpp
@@ -33,6 +33,70 @@
 const float g_SymmS16QuantizationBase = 32767.0f;
 const float g_TestTolerance = 0.000001f;
 
+class TestConnectionPreservation : public LayerVisitorBase<VisitorNoThrowPolicy>
+{
+public:
+    TestConnectionPreservation(INetwork* network)
+        : LayerVisitorBase<VisitorNoThrowPolicy>()
+        , m_Network(network)
+    {}
+
+    void VisitAdditionLayer(const IConnectableLayer* layer, const char*) override
+    {
+        CheckLayerName(layer->GetInputSlot(0).GetConnection()->GetOwningLayerGuid(), "reLU1");
+        CheckLayerName(layer->GetInputSlot(1).GetConnection()->GetOwningLayerGuid(), "reLU2");
+    }
+
+    void CheckLayerName(LayerGuid guid, std::string expectedName)
+    {
+        auto graph = m_Network->pNetworkImpl->GetGraph();
+        bool guidFound = false;
+        for (Layer* layer : graph)
+        {
+            if (layer->GetGuid() == guid)
+            {
+                BOOST_CHECK_EQUAL(layer->GetName(), expectedName.c_str());
+                guidFound = true;
+                break;
+            }
+        }
+        if (!guidFound)
+        {
+            BOOST_FAIL("No layer matching the GUID was found");
+        }
+    }
+
+private:
+    INetwork* m_Network;
+};
+
+void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& visitor)
+{
+    auto graph = inputNetwork->pNetworkImpl->GetGraph().TopologicalSort();
+
+    ApplyStrategyToLayers(graph, visitor);
+}
+
+TensorInfo GetInputTensorInfo(const INetwork* network)
+{
+    for (auto&& inputLayer : network->pNetworkImpl->GetGraph().GetInputLayers())
+    {
+        ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+        return inputLayer->GetOutputSlot(0).GetTensorInfo();
+    }
+    throw InvalidArgumentException("Network has no input layers");
+}
+
+TensorInfo GetInputTensorInfo(const NetworkImpl* network)
+{
+    for (auto&& inputLayer : network->GetGraph().GetInputLayers())
+    {
+        ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+        return inputLayer->GetOutputSlot(0).GetTensorInfo();
+    }
+    throw InvalidArgumentException("Network has no input layers");
+}
+
 BOOST_AUTO_TEST_SUITE(Quantizer)
 
 class TestQuantization : public IStrategy
@@ -473,14 +537,6 @@
     QuantizerOptions m_QuantizerOptions;
 };
 
-void VisitLayersTopologically(const INetwork* inputNetwork, IStrategy& strategy)
-{
-    auto network = PolymorphicDowncast<const Network*>(inputNetwork);
-    auto graph = network->GetGraph().TopologicalSort();
-
-    ApplyStrategyToLayers(graph, strategy);
-}
-
 void TestNetwork(INetwork* network, const TensorShape inShape, const TensorShape outShape)
 {
     const QuantizerOptions qAsymmU8Options(DataType::QAsymmU8);
@@ -596,21 +652,11 @@
     return network;
 }
 
-TensorInfo GetInputTensorInfo(const Network* network)
-{
-    for (auto&& inputLayer : network->GetGraph().GetInputLayers())
-    {
-        ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
-        return inputLayer->GetOutputSlot(0).GetTensorInfo();
-    }
-    throw InvalidArgumentException("Network has no input layers");
-}
-
 BOOST_AUTO_TEST_CASE(InputOutputLayerDynamicQuant)
 {
     INetworkPtr network = CreateNetworkWithInputOutputLayers();
 
-    armnn::TensorInfo tensorInfo = GetInputTensorInfo(PolymorphicDowncast<const Network*>(network.get()));
+    armnn::TensorInfo tensorInfo = GetInputTensorInfo(network.get());
 
     // Outliers -56 and 98
     std::vector<float> inputData({0, 0, 0, -56, 98, 0, 0, 0});
@@ -870,7 +916,7 @@
     RangeTracker ranges;
     RangeTracker::MinMaxRange minMaxRange(-12.3f, 45.6f); // Range to use for the override
 
-    Network network; // Empty network
+    NetworkImpl network; // Empty network
     auto inputLayers = network.GetGraph().GetInputLayers(); // Empty list of input layers
 
     OverrideInputRangeStrategy overrideInputRangeStrategy(ranges, 0, minMaxRange);
@@ -884,7 +930,7 @@
     RangeTracker ranges;
     MinMaxRange minMaxRange(-12.3f, 45.6f); // Range to use for the override
 
-    Network network;
+    NetworkImpl network;
     network.AddAdditionLayer(); // Network with no input layers
     auto inputLayers = network.GetGraph().GetInputLayers(); // Empty list of input layers
 
@@ -899,7 +945,7 @@
     RangeTracker ranges;
     MinMaxRange minMaxRange(-12.3f, 45.6f); // Range to use for the override
 
-    Network network;
+    NetworkImpl network;
 
     // Adding the layers
     IConnectableLayer* input0 = network.AddInputLayer(0);
@@ -2117,16 +2163,25 @@
         Graph m_Graph;
     };
 
-    INetworkPtr network = INetwork::Create();
+    class TestNetwork : public INetwork
+    {
+    public :
+        NetworkImpl* GetPNetworkImpl()
+        {
+            return  pNetworkImpl.get();
+        }
+    };
 
-    IConnectableLayer* inputLayer =  network->AddInputLayer(0,"inputLayer1");
+    TestNetwork testNetwork;
+
+    IConnectableLayer* inputLayer =  testNetwork.AddInputLayer(0,"inputLayer1");
     armnn::ActivationDescriptor ReLUDesc;
     ReLUDesc.m_Function = ActivationFunction::ReLu;
 
-    IConnectableLayer* reLULayer1 = network->AddActivationLayer(ReLUDesc, "reLU1");
-    IConnectableLayer* reLULayer2 = network->AddActivationLayer(ReLUDesc, "reLU2");
-    IConnectableLayer* addLayer1 = network->AddAdditionLayer("addLayer1");
-    IConnectableLayer* outputLayer = network->AddOutputLayer(0,"outPutLayer1");
+    IConnectableLayer* reLULayer1 = testNetwork.AddActivationLayer(ReLUDesc, "reLU1");
+    IConnectableLayer* reLULayer2 = testNetwork.AddActivationLayer(ReLUDesc, "reLU2");
+    IConnectableLayer* addLayer1 = testNetwork.AddAdditionLayer("addLayer1");
+    IConnectableLayer* outputLayer = testNetwork.AddOutputLayer(0,"outPutLayer1");
 
     inputLayer->GetOutputSlot(0).Connect(reLULayer1->GetInputSlot(0));
     reLULayer1->GetOutputSlot(0).Connect(reLULayer2->GetInputSlot(0));
@@ -2139,12 +2194,12 @@
     reLULayer2->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32));
     addLayer1->GetOutputSlot(0).SetTensorInfo(TensorInfo(TensorShape({1, 2, 2, 1}), DataType::Float32));
 
-    TestConnectionPreservation strategy1(PolymorphicDowncast<const Network*>(network.get())->GetGraph());
-    VisitLayersTopologically(network.get(), strategy1);
+    TestConnectionPreservation strategy1(testNetwork.GetPNetworkImpl()->GetGraph());
+    VisitLayersTopologically(&testNetwork, strategy1);
 
-    armnn::INetworkQuantizerPtr quantizer = armnn::INetworkQuantizer::Create(network.get());
+    armnn::INetworkQuantizerPtr quantizer = armnn::INetworkQuantizer::Create(&testNetwork);
 
-    armnn::TensorInfo tensorInfo = GetInputTensorInfo(PolymorphicDowncast<const Network*>(network.get()));
+    armnn::TensorInfo tensorInfo = GetInputTensorInfo(&testNetwork);
 
     std::vector<float> inputData({0, 2, 0, 4});
     armnn::ConstTensor inputTensor(tensorInfo, inputData.data());
@@ -2155,7 +2210,9 @@
 
     INetworkPtr quantNetwork = quantizer->ExportNetwork();
 
-    TestConnectionPreservation strategy2(PolymorphicDowncast<const Network*>(quantNetwork.get())->GetGraph());
+    TestNetwork* testQuantNetwork = static_cast<TestNetwork*>(quantNetwork.get());
+
+    TestConnectionPreservation strategy2(testQuantNetwork->GetPNetworkImpl()->GetGraph());
     VisitLayersTopologically(quantNetwork.get(), strategy2);
 }
 
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index 1d5960b..c5457d0 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -135,7 +135,7 @@
     {
         std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
 
-        std::unique_ptr<armnn::Network> mockNetwork1 = std::make_unique<armnn::Network>();
+        armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create());
         mockNetwork1->AddInputLayer(0, "test layer");
 
         // Warm-up load/unload pair to put the runtime in a stable state (memory-wise).
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.cpp b/src/armnn/test/TestInputOutputLayerVisitor.cpp
index 35ffc55..6563517 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.cpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.cpp
@@ -14,7 +14,7 @@
 {
     const char* layerName = "InputLayer";
     TestInputLayerVisitor visitor(1, layerName);
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer *const layer = net.AddInputLayer(1, layerName);
     layer->Accept(visitor);
@@ -23,7 +23,7 @@
 BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndNameNull)
 {
     TestInputLayerVisitor visitor(1);
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer *const layer = net.AddInputLayer(1);
     layer->Accept(visitor);
@@ -33,7 +33,7 @@
 {
     const char* layerName = "OutputLayer";
     TestOutputLayerVisitor visitor(1, layerName);
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer *const layer = net.AddOutputLayer(1, layerName);
     layer->Accept(visitor);
@@ -42,7 +42,7 @@
 BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndNameNull)
 {
     TestOutputLayerVisitor visitor(1);
-    Network net;
+    NetworkImpl net;
 
     IConnectableLayer *const layer = net.AddOutputLayer(1);
     layer->Accept(visitor);
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 7d4dcaa..39e2543 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -16,7 +16,7 @@
     const char* layerName = "name##Layer"; \
     armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
     Test##name##LayerVisitor visitor(descriptor, layerName); \
-    armnn::Network net; \
+    armnn::NetworkImpl net; \
     armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor, layerName); \
     layer->Accept(visitor); \
 }
@@ -26,7 +26,7 @@
 { \
     armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
     Test##name##LayerVisitor visitor(descriptor); \
-    armnn::Network net; \
+    armnn::NetworkImpl net; \
     armnn::IConnectableLayer *const layer = net.Add##name##Layer(descriptor); \
     layer->Accept(visitor); \
 }
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 994375d..971d7ee 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -16,7 +16,7 @@
 BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorName) \
 { \
     Test##name##LayerVisitor visitor("name##Layer"); \
-    armnn::Network net; \
+    armnn::NetworkImpl net; \
     armnn::IConnectableLayer *const layer = net.Add##name##Layer("name##Layer"); \
     layer->Accept(visitor); \
 }
@@ -25,7 +25,7 @@
 BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptr) \
 { \
     Test##name##LayerVisitor visitor; \
-    armnn::Network net; \
+    armnn::NetworkImpl net; \
     armnn::IConnectableLayer *const layer = net.Add##name##Layer(); \
     layer->Accept(visitor); \
 }
diff --git a/src/armnn/test/TestUtils.cpp b/src/armnn/test/TestUtils.cpp
index 440d4e0..6020c76 100644
--- a/src/armnn/test/TestUtils.cpp
+++ b/src/armnn/test/TestUtils.cpp
@@ -22,6 +22,16 @@
 namespace armnn
 {
 
+Graph& GetGraphForTesting(IOptimizedNetwork* optNet)
+{
+    return optNet->pOptimizedNetworkImpl->GetGraph();
+}
+
+ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNet)
+{
+    return optNet->pOptimizedNetworkImpl->GetModelOptions();
+}
+
 profiling::ProfilingService& GetProfilingService(armnn::RuntimeImpl* runtime)
 {
     return runtime->m_ProfilingService;
diff --git a/src/armnn/test/TestUtils.hpp b/src/armnn/test/TestUtils.hpp
index bf222b3..fa9156b 100644
--- a/src/armnn/test/TestUtils.hpp
+++ b/src/armnn/test/TestUtils.hpp
@@ -51,7 +51,8 @@
 
 namespace armnn
 {
-
+Graph& GetGraphForTesting(IOptimizedNetwork* optNetPtr);
+ModelOptions& GetModelOptionsForTesting(IOptimizedNetwork* optNetPtr);
 profiling::ProfilingService& GetProfilingService(RuntimeImpl* runtime);
 
 } // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index c8adea2..71a554b 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -345,7 +345,7 @@
     // Optimise ArmNN network
     IOptimizedNetworkPtr optNetFused = Optimize(*networkFused, {backendId}, run->GetDeviceSpec());
 
-    Graph graphFused = PolymorphicDowncast<OptimizedNetwork*>(optNetFused.get())->GetGraph();
+    Graph& graphFused = GetGraphForTesting(optNetFused.get());
 
     auto checkFusedConv2d = [](const Layer* const layer)->bool {
         return IsLayerOfType<LayerType>(layer) &&
@@ -386,7 +386,7 @@
     // Optimise ArmNN network
     IOptimizedNetworkPtr optNetNotFused = Optimize(*networkNotFused, {backendId}, runNotFused->GetDeviceSpec());
 
-    Graph graphNotFused = PolymorphicDowncast<OptimizedNetwork*>(optNetNotFused.get())->GetGraph();
+    Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
 
     BOOST_CHECK(5 == graphNotFused.GetNumLayers());
     BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
@@ -443,8 +443,6 @@
         // Optimise ArmNN network
         IOptimizedNetworkPtr optNetFused = Optimize(*networkFused, {backendId}, run->GetDeviceSpec());
 
-        Graph graphFused = PolymorphicDowncast<OptimizedNetwork*>(optNetFused.get())->GetGraph();
-
         // Load network into runtime
         NetworkId networkIdentifier;
         BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index bf47c57..be66c5e 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -186,7 +186,7 @@
     // Optimise ArmNN network
     IOptimizedNetworkPtr optNetFused = Optimize(*networkFused, {backendId}, run->GetDeviceSpec());
 
-    Graph graphFused = PolymorphicDowncast<OptimizedNetwork*>(optNetFused.get())->GetGraph();
+    Graph& graphFused = GetGraphForTesting(optNetFused.get());
 
     auto checkFusedConv2d = [ ](const armnn::Layer* const layer) -> bool
     {
@@ -233,7 +233,7 @@
     // Optimise ArmNN network
     IOptimizedNetworkPtr optNetNotFused = Optimize(*networkNotFused, {backendId}, runNotFused->GetDeviceSpec());
 
-    Graph graphNotFused = PolymorphicDowncast<OptimizedNetwork*>(optNetNotFused.get())->GetGraph();
+    Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
 
     BOOST_CHECK(5 == graphNotFused.GetNumLayers());
     BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
index 6bfd7e3..b47e3c7 100644
--- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
@@ -50,6 +50,36 @@
 }
 
 /// Shared function for the below tests, so that we test the same network in both cases.
+std::unique_ptr<NetworkImpl> CreateTestNetworkImpl()
+{
+    std::unique_ptr<NetworkImpl> network(new NetworkImpl());
+
+    auto input = network->AddInputLayer(0, "input");
+    const TensorInfo inputInfo({ 1, 2, 3, 4 }, DataType::Float32);
+    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+    // Insert Permute which swaps batches and channels dimensions
+    auto permute = network->AddPermuteLayer(PermuteDescriptor(PermutationVector{ 3, 1, 2, 0 }), "permute");
+    const TensorInfo permuteInfo({ 4, 2, 3, 1 }, DataType::Float32);
+    permute->GetOutputSlot(0).SetTensorInfo(permuteInfo);
+    input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
+
+    // Insert BatchToSpace
+    BatchToSpaceNdDescriptor batchToSpaceDesc;
+    batchToSpaceDesc.m_BlockShape = { 2, 2 };
+    batchToSpaceDesc.m_DataLayout = DataLayout::NHWC;
+    auto batchToSpace             = network->AddBatchToSpaceNdLayer(batchToSpaceDesc, "batchToSpace");
+    const TensorInfo batchToSpaceInfo({ 1, 4, 6, 1 }, DataType::Float32);
+    batchToSpace->GetOutputSlot(0).SetTensorInfo(batchToSpaceInfo);
+    permute->GetOutputSlot(0).Connect(batchToSpace->GetInputSlot(0));
+
+    auto output = network->AddOutputLayer(0, "output");
+    batchToSpace->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    return network;
+}
+
+/// Shared function for the below tests, so that we test the same network in both cases.
 INetworkPtr CreateTransposeTestNetwork()
 {
     // Create a network
@@ -80,14 +110,45 @@
     return network;
 }
 
+/// Shared function for the below tests, so that we test the same network in both cases.
+std::unique_ptr<NetworkImpl> CreateTransposeTestNetworkImpl()
+{
+    // Create a network
+    std::unique_ptr<NetworkImpl> network(new NetworkImpl());
+
+    auto input = network->AddInputLayer(0, "input");
+    const TensorInfo inputInfo({ 1, 2, 3, 4 }, DataType::Float32);
+    input->GetOutputSlot(0).SetTensorInfo(inputInfo);
+
+    // Insert Permute which swaps batches and channels dimensions
+    auto permute = network->AddTransposeLayer(TransposeDescriptor(PermutationVector{ 3, 1, 2, 0 }), "permute");
+    const TensorInfo permuteInfo({ 4, 2, 3, 1 }, DataType::Float32);
+    permute->GetOutputSlot(0).SetTensorInfo(permuteInfo);
+    input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
+
+    // Insert BatchToSpace
+    BatchToSpaceNdDescriptor batchToSpaceDesc;
+    batchToSpaceDesc.m_BlockShape = { 2, 2 };
+    batchToSpaceDesc.m_DataLayout = DataLayout::NHWC;
+    auto batchToSpace             = network->AddBatchToSpaceNdLayer(batchToSpaceDesc, "batchToSpace");
+    const TensorInfo batchToSpaceInfo({ 1, 4, 6, 1 }, DataType::Float32);
+    batchToSpace->GetOutputSlot(0).SetTensorInfo(batchToSpaceInfo);
+    permute->GetOutputSlot(0).Connect(batchToSpace->GetInputSlot(0));
+
+    auto output = network->AddOutputLayer(0, "output");
+    batchToSpace->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    return network;
+}
+
 }    // namespace
 
 /// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
 /// Note this does not ensure the correctness of the optimization - that is done in the below test.
 BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
 {
-    INetworkPtr network = CreateTestNetwork();
-    Graph graph         = static_cast<Network*>(network.get())->GetGraph();
+    std::unique_ptr<NetworkImpl> network = CreateTestNetworkImpl();
+    Graph graph         = network.get()->GetGraph();
 
     // Confirm initial graph is as we expect
     BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
@@ -116,8 +177,8 @@
 /// Note this does not ensure the correctness of the optimization - that is done in the below test.
 BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest)
 {
-    INetworkPtr network = CreateTransposeTestNetwork();
-    Graph graph         = static_cast<Network*>(network.get())->GetGraph();
+    std::unique_ptr<NetworkImpl> network = CreateTransposeTestNetworkImpl();
+    Graph graph         = network.get()->GetGraph();
 
     // Confirm initial graph is as we expect
     BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
@@ -155,7 +216,7 @@
     IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, { Compute::CpuRef }, runtime->GetDeviceSpec());
 
     // Confirm that the optimization has actually taken place
-    const Graph& optGraph = static_cast<OptimizedNetwork*>(optimizedNetwork.get())->GetGraph();
+    const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
     BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
 
@@ -202,7 +263,7 @@
     IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, { Compute::CpuRef }, runtime->GetDeviceSpec());
 
     // Confirm that the optimization has actually taken place
-    const Graph& optGraph = static_cast<OptimizedNetwork*>(optimizedNetwork.get())->GetGraph();
+    const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
     BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
 
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
index cb4173a..304520c 100644
--- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -241,8 +241,7 @@
 
     auto optimized = Optimize(*network, { armnn::Compute::CpuRef }, m_Runtime->GetDeviceSpec());
 
-    auto optimizedNetwork = armnn::PolymorphicDowncast<armnn::OptimizedNetwork*>(optimized.get());
-    auto graph = optimizedNetwork->GetGraph();
+    armnn::Graph& graph = GetGraphForTesting(optimized.get());
 
     // Check the number of layers in the graph
     BOOST_TEST((graph.GetNumInputs() == 2));
diff --git a/src/armnnTfParser/test/Assert.cpp b/src/armnnTfParser/test/Assert.cpp
index b978f02..0665be7 100644
--- a/src/armnnTfParser/test/Assert.cpp
+++ b/src/armnnTfParser/test/Assert.cpp
@@ -102,8 +102,7 @@
 {
     auto optimized = SetupOptimizedNetwork({ { "Placeholder", { 1, 1, 1, 4 } } }, { "Add" });
 
-    auto optimizedNetwork = armnn::PolymorphicDowncast<armnn::OptimizedNetwork*>(optimized.get());
-    auto graph = optimizedNetwork->GetGraph();
+    armnn::Graph& graph = GetGraphForTesting(optimized.get());
 
     BOOST_TEST((graph.GetNumInputs() == 1));
     BOOST_TEST((graph.GetNumOutputs() == 1));
@@ -258,8 +257,7 @@
                                              { "Input1", { 1, 1, 2, 2 } } },
                                            { "Output" });
 
-    auto optimizedNetwork = armnn::PolymorphicDowncast<armnn::OptimizedNetwork*>(optimized.get());
-    auto graph = optimizedNetwork->GetGraph();
+    armnn::Graph& graph = GetGraphForTesting(optimized.get());
 
     BOOST_TEST((graph.GetNumInputs() == 2));
     BOOST_TEST((graph.GetNumOutputs() == 1));
diff --git a/src/backends/backendsCommon/WorkloadFactory.hpp b/src/backends/backendsCommon/WorkloadFactory.hpp
index 6ab6d2c..13fd190 100644
--- a/src/backends/backendsCommon/WorkloadFactory.hpp
+++ b/src/backends/backendsCommon/WorkloadFactory.hpp
@@ -7,6 +7,7 @@
 #include <armnn/TensorFwd.hpp>
 #include <armnn/Optional.hpp>
 #include <armnn/backends/ITensorHandle.hpp>
+#include <armnn/INetwork.hpp>
 
 #include <backendsCommon/Workload.hpp>
 
diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
index c972b4b..b472a03 100644
--- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
@@ -212,8 +212,8 @@
     BOOST_CHECK(optNet);
 
     // Check the optimised graph
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    CheckLayers(optNetObjPtr->GetGraph());
+    armnn::Graph& graph = GetGraphForTesting(optNet.get());
+    CheckLayers(graph);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 721dfb0..66d166f 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -15,12 +15,13 @@
 
 BOOST_AUTO_TEST_CASE(SerializeToDot)
 {
-    armnn::Network net;
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
 
     //Defines layers.
-    auto input = net.AddInputLayer(0);
-    auto add = net.AddAdditionLayer();
-    auto output = net.AddOutputLayer(0);
+    auto input = net->AddInputLayer(0);
+    auto add = net->AddAdditionLayer();
+    auto output = net->AddOutputLayer(0);
 
     // Connects layers.
     input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
@@ -36,7 +37,7 @@
     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
 
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
 
     std::ostringstream ss;
     optimizedNet->SerializeToDot(ss);
@@ -127,7 +128,10 @@
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_REQUIRE(optNet);
 
-    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    armnn::Graph& graph = GetGraphForTesting(optNet.get());
+    graph.AllocateDynamicBuffers();
+
+    for (auto&& layer : graph)
     {
         // If NEON is enabled, Input and Output layers are supported by CpuAcc,
         // the other layers are supported by CpuRef.
@@ -151,7 +155,8 @@
 {
     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
 
-    armnn::Network  net;
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
 
     armnn::NormalizationDescriptor nmDesc;
     armnn::ActivationDescriptor acDesc;
@@ -167,21 +172,21 @@
     //    sm
     //     |
     //    ot
-    armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+    armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
-    armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+    armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
 
     layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).SetTensorInfo(desc);
 
-    layer = net.AddActivationLayer(acDesc, "ac");
+    layer = net->AddActivationLayer(acDesc, "ac");
 
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     armnn::IConnectableLayer* prevLayer = layer;
-    layer = net.AddMultiplicationLayer("ml");
+    layer = net->AddMultiplicationLayer("ml");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -189,13 +194,13 @@
 
     prevLayer = layer;
     armnn::SoftmaxDescriptor softmaxDescriptor;
-    layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+    layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     prevLayer = layer;
-    layer = net.AddOutputLayer(0, "ot");
+    layer = net->AddOutputLayer(0, "ot");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
 
@@ -207,7 +212,7 @@
 
     try
     {
-        Optimize(net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
+        Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
         BOOST_FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException& e)
@@ -221,7 +226,8 @@
 {
     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
 
-    armnn::Network  net;
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
 
     armnn::NormalizationDescriptor nmDesc;
     armnn::ActivationDescriptor acDesc;
@@ -237,21 +243,21 @@
     //    sm
     //     |
     //    ot
-    armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+    armnn::IConnectableLayer* layer = net->AddInputLayer(0, "in");
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
-    armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+    armnn::IConnectableLayer* const normLayer = net->AddNormalizationLayer(nmDesc, "nm");
 
     layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).SetTensorInfo(desc);
 
-    layer = net.AddActivationLayer(acDesc, "ac");
+    layer = net->AddActivationLayer(acDesc, "ac");
 
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     armnn::IConnectableLayer* prevLayer = layer;
-    layer = net.AddMultiplicationLayer("ml");
+    layer = net->AddMultiplicationLayer("ml");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -259,13 +265,13 @@
 
     prevLayer = layer;
     armnn::SoftmaxDescriptor softmaxDescriptor;
-    layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+    layer = net->AddSoftmaxLayer(softmaxDescriptor, "sm");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     layer->GetOutputSlot(0).SetTensorInfo(desc);
 
     prevLayer = layer;
-    layer = net.AddOutputLayer(0, "ot");
+    layer = net->AddOutputLayer(0, "ot");
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
 
@@ -274,12 +280,15 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
 
-    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_CHECK(optNet);
 
+    armnn::Graph& graph = GetGraphForTesting(optNet.get());
+    graph.AllocateDynamicBuffers();
+
     // validate workloads
     armnn::RefWorkloadFactory fact;
-    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    for (auto&& layer : graph)
     {
         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
         BOOST_CHECK_NO_THROW(
@@ -316,7 +325,10 @@
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     BOOST_REQUIRE(optNet);
 
-    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    armnn::Graph& graph = GetGraphForTesting(optNet.get());
+    graph.AllocateDynamicBuffers();
+
+    for (auto&& layer : graph)
     {
         // If NEON is enabled, Input and Output layers are supported by CpuAcc,
         // the other layers are supported by CpuRef.
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 5885cbe..4384ae5 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -51,8 +51,7 @@
     optOptions.m_ImportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -177,8 +176,7 @@
     OptimizerOptions optOptions;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -307,8 +305,7 @@
     optOptions.m_ImportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -445,8 +442,7 @@
     OptimizerOptions optOptions;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index dddc5aa..a41c5f8 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -39,7 +39,9 @@
     // validate workloads
     armnn::ClWorkloadFactory fact =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
-    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+
+    const armnn::Graph& theGraph = GetGraphForTesting(optNet.get());
+    for (auto&& layer : theGraph)
     {
         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
         BOOST_CHECK_NO_THROW(
@@ -59,17 +61,17 @@
     // if there are inverse conversion layers remove them with optimization
     // at the moment FloorLayer is not supported in Fp16 so it rolls back to Fp32
     // and inverse conversion layers are removed by the optimizer
-    armnn::Network net;
+    armnn::INetworkPtr net(armnn::INetwork::Create());
 
     // Defines layers.
-    auto input = net.AddInputLayer(0, "input layer");
+    auto input = net->AddInputLayer(0, "input layer");
     // ReLu1
     armnn::ActivationDescriptor activation1Descriptor;
     activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
     activation1Descriptor.m_A = 1.f;
     activation1Descriptor.m_B = -1.f;
-    auto activation = net.AddActivationLayer(activation1Descriptor, "activation layer");
-    auto output = net.AddOutputLayer(0, "output layer");
+    auto activation = net->AddActivationLayer(activation1Descriptor, "activation layer");
+    auto output = net->AddOutputLayer(0, "output layer");
 
     // Connects layers.
     input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
@@ -89,9 +91,9 @@
     optimizerOptions.m_ReduceFp32ToFp16 = true;
 
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
-            net, backends, runtime->GetDeviceSpec(), optimizerOptions);
+            *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
-    const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+    const armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
 
     // Tests that all layers are present in the graph.
     BOOST_TEST(graph.GetNumLayers() == 5);
@@ -127,7 +129,7 @@
 
     BOOST_CHECK(optimizedNet);
 
-    auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+    auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
     BOOST_TEST(modelOptionsOut.size() == 1);
     BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index fd7fbbc..2d70cc2 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -62,8 +62,7 @@
     optOptions.m_ImportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -200,8 +199,7 @@
     optOptions.m_ImportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -331,8 +329,7 @@
     optOptions.m_ImportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -469,8 +466,7 @@
     optOptions.m_ImportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -598,8 +594,7 @@
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -723,8 +718,7 @@
     optOptions.m_ImportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -849,8 +843,7 @@
     OptimizerOptions optOptions;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -979,8 +972,7 @@
     optOptions.m_ImportEnabled = true;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
@@ -1121,8 +1113,7 @@
     OptimizerOptions optOptions;
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
-    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
-    Graph& graph = optNetObjPtr->GetGraph();
+    Graph& graph = GetGraphForTesting(optNet.get());
 
     armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
     armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 85f0617..4944c31 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -35,7 +35,8 @@
     armnn::NeonWorkloadFactory fact =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
 
-    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    armnn::Graph& graph = GetGraphForTesting(optNet.get());
+    for (auto&& layer : graph)
     {
         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
         BOOST_CHECK_NO_THROW(
@@ -103,7 +104,7 @@
 
     BOOST_CHECK(optimizedNet);
 
-    auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+    auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
     BOOST_TEST(modelOptionsOut.size() == 1);
     BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
@@ -134,8 +135,10 @@
             *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
     BOOST_CHECK(optimizedNet);
+    std::unique_ptr<armnn::Graph> graphPtr;
+    armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
 
-    auto modelOptionsOut = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetModelOptions();
+    auto modelOptionsOut = impl.GetModelOptions();
 
     BOOST_TEST(modelOptionsOut.size() == 1);
     BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index e6d7402..0e24e95 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -128,7 +128,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+    const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
 
     // Load graph into runtime
     armnn::NetworkId networkIdentifier;
@@ -211,7 +211,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+    const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
 
     // Load graph into runtime
     armnn::NetworkId networkIdentifier;
@@ -380,7 +380,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+    const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
 
     // Load graph into runtime
     armnn::NetworkId networkIdentifier;
@@ -555,7 +555,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    const armnn::Graph& theGraph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+    const armnn::Graph& theGraph = GetGraphForTesting(optimizedNet.get());
 
     // Load graph into runtime
     armnn::NetworkId networkIdentifier;
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 16ff202..086c1e4 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -71,12 +71,13 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph().AllocateDynamicBuffers();
+    armnn::Graph& graph = GetGraphForTesting(optNet.get());
+    graph.AllocateDynamicBuffers();
     BOOST_CHECK(optNet);
 
     // Validates workloads.
     armnn::RefWorkloadFactory fact;
-    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    for (auto&& layer : graph)
     {
         BOOST_CHECK_NO_THROW(layer->CreateWorkload(fact));
     }
@@ -109,7 +110,10 @@
     // optimize the network
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    armnn::Graph& graph = GetGraphForTesting(optNet.get());
+    graph.AllocateDynamicBuffers();
+
+    for (auto&& layer : graph)
     {
         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
     }
@@ -141,8 +145,9 @@
 
     // optimize the network
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-
-    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    armnn::Graph& graph = GetGraphForTesting(optNet.get());
+    graph.AllocateDynamicBuffers();
+    for (auto&& layer : graph)
     {
         BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
     }
@@ -183,7 +188,9 @@
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
                                                                optimizerOptions);
 
-    const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
+    armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
+    graph.AllocateDynamicBuffers();
+
     // Tests that all layers are present in the graph.
     BOOST_TEST(graph.GetNumLayers() == 5);