Release 18.05.02
diff --git a/ModelToINetworkConverter.hpp b/ModelToINetworkConverter.hpp
index 7ced514..864a2fc 100644
--- a/ModelToINetworkConverter.hpp
+++ b/ModelToINetworkConverter.hpp
@@ -9,6 +9,8 @@
 #include "NeuralNetworks.h"
 #include "ActivationFunctor.h"
 
+#include "ArmnnDriver.hpp"
+
 #include <armnn/ArmNN.hpp>
 #include <armnn/INetwork.hpp>
 #include <CpuExecutor.h>
@@ -37,7 +39,7 @@
 class ModelToINetworkConverter
 {
 public:
-    ModelToINetworkConverter(armnn::Compute compute, const Model& model,
+    ModelToINetworkConverter(armnn::Compute compute, const V1_0::Model& model,
         const std::set<unsigned int>& forcedUnsupportedOperations);
 
     ConversionResult GetConversionResult() const { return m_ConversionResult; }
@@ -50,76 +52,76 @@
 private:
     void Convert();
 
-    bool ConvertOperation(const Operation& operation);
+    bool ConvertOperation(const V1_0::Operation& operation);
 
-    bool ConvertAdd(const Operation& operation);
+    bool ConvertAdd(const V1_0::Operation& operation);
 
-    bool ConvertAveragePool2d(const Operation& operation);
+    bool ConvertAveragePool2d(const V1_0::Operation& operation);
 
-    bool ConvertConcatenation(const Operation& operation);
+    bool ConvertConcatenation(const V1_0::Operation& operation);
 
-    bool ConvertConv2d(const Operation& operation);
+    bool ConvertConv2d(const V1_0::Operation& operation);
 
-    bool ConvertDepthwiseConv2d(const Operation& operation);
+    bool ConvertDepthwiseConv2d(const V1_0::Operation& operation);
 
-    bool ConvertFloor(const Operation& operation);
+    bool ConvertFloor(const V1_0::Operation& operation);
 
-    bool ConvertFullyConnected(const Operation& operation);
+    bool ConvertFullyConnected(const V1_0::Operation& operation);
 
-    bool ConvertLogistic(const Operation& operation);
+    bool ConvertLogistic(const V1_0::Operation& operation);
 
-    bool ConvertLocalResponseNormalization(const Operation& operation);
+    bool ConvertLocalResponseNormalization(const V1_0::Operation& operation);
 
-    bool ConvertL2Normalization(const Operation& operation);
+    bool ConvertL2Normalization(const V1_0::Operation& operation);
 
-    bool ConvertL2Pool2d(const Operation& operation);
+    bool ConvertL2Pool2d(const V1_0::Operation& operation);
 
-    bool ConvertMaxPool2d(const Operation& operation);
+    bool ConvertMaxPool2d(const V1_0::Operation& operation);
 
-    bool ConvertMul(const Operation& operation);
+    bool ConvertMul(const V1_0::Operation& operation);
 
-    bool ConvertReLu(const Operation& operation);
+    bool ConvertReLu(const V1_0::Operation& operation);
 
-    bool ConvertReLu1(const Operation& operation);
+    bool ConvertReLu1(const V1_0::Operation& operation);
 
-    bool ConvertReLu6(const Operation& operation);
+    bool ConvertReLu6(const V1_0::Operation& operation);
 
-    bool ConvertSoftmax(const Operation& operation);
+    bool ConvertSoftmax(const V1_0::Operation& operation);
 
-    bool ConvertTanH(const Operation& operation);
+    bool ConvertTanH(const V1_0::Operation& operation);
 
-    bool ConvertReshape(const Operation& operation);
+    bool ConvertReshape(const V1_0::Operation& operation);
 
-    bool ConvertResizeBilinear(const Operation& operation);
+    bool ConvertResizeBilinear(const V1_0::Operation& operation);
 
-    bool ConvertToActivation(const Operation& operation, const char* operationName,
+    bool ConvertToActivation(const V1_0::Operation& operation, const char* operationName,
         const armnn::ActivationDescriptor& activationDesc);
 
-    bool ConvertPooling2d(const Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);
+    bool ConvertPooling2d(const V1_0::Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);
 
 
     const void* GetOperandValueReadOnlyAddress(const Operand& operand) const;
 
-    const Operand* GetInputOperand(const Operation& operation, uint32_t inputIndex) const;
+    const Operand* GetInputOperand(const V1_0::Operation& operation, uint32_t inputIndex) const;
 
-    const Operand* GetOutputOperand(const Operation& operation, uint32_t outputIndex) const;
+    const Operand* GetOutputOperand(const V1_0::Operation& operation, uint32_t outputIndex) const;
 
     template<typename T>
-    bool GetInputScalar(const Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
+    bool GetInputScalar(const V1_0::Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
 
-    bool GetInputInt32(const Operation& operation, uint32_t inputIndex, int32_t& outValue) const;
+    bool GetInputInt32(const V1_0::Operation& operation, uint32_t inputIndex, int32_t& outValue) const;
 
-    bool GetInputFloat32(const Operation& operation, uint32_t inputIndex, float& outValue) const;
+    bool GetInputFloat32(const V1_0::Operation& operation, uint32_t inputIndex, float& outValue) const;
 
-    bool GetInputActivationFunction(const Operation& operation, uint32_t inputIndex,
+    bool GetInputActivationFunction(const V1_0::Operation& operation, uint32_t inputIndex,
         ActivationFn& outActivationFunction) const;
 
-    bool GetInputPaddingScheme(const Operation& operation, uint32_t inputIndex,
+    bool GetInputPaddingScheme(const V1_0::Operation& operation, uint32_t inputIndex,
         android::nn::PaddingScheme& outPaddingScheme) const;
 
-    LayerInputHandle ConvertToLayerInputHandle(const Operation& operation, uint32_t inputIndex);
+    LayerInputHandle ConvertToLayerInputHandle(const V1_0::Operation& operation, uint32_t inputIndex);
 
-    ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation& operation, uint32_t inputIndex,
+    ConstTensorPin ConvertOperationInputToConstTensorPin(const V1_0::Operation& operation, uint32_t inputIndex,
         const armnn::PermutationVector& dimensionMappings = g_DontPermute,
         const armnn::TensorShape* overrideTensorShape = nullptr);
 
@@ -134,13 +136,13 @@
                                                 armnn::IConnectableLayer* prevLayer);
 
 
-    bool SetupAndTrackLayerOutputSlot(const Operation& operation, uint32_t outputIndex,
+    bool SetupAndTrackLayerOutputSlot(const V1_0::Operation& operation, uint32_t outputIndex,
                                       armnn::IConnectableLayer& layer);
 
 
     // Input data
     armnn::Compute                    m_Compute;
-    const Model&                      m_Model;
+    const V1_0::Model&                m_Model;
     const std::set<unsigned int>&     m_ForcedUnsupportedOperations;
 
     // Output data