Revert "IVGCVSW-6873 Import inputs but don't export outputs fails."

This reverts commit 03bf98a8bc51ad20eef4b9ca5fbf6ce15e063721.

Reason for revert: Caused failures in tests located in internal repo.

Change-Id: If35cb0ede349b270e4e7827324382e09455d8cfa
diff --git a/python/pyarmnn/src/pyarmnn/_version.py b/python/pyarmnn/src/pyarmnn/_version.py
index d1b1ca2..7c0940e 100644
--- a/python/pyarmnn/src/pyarmnn/_version.py
+++ b/python/pyarmnn/src/pyarmnn/_version.py
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: MIT
 import os
 
-version_info = (30, 0, 0)
+version_info = (29, 0, 0)
 
 __dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
 
@@ -24,7 +24,7 @@
     """Compares expected Arm NN version and Arm NN version used to build the package.
 
     Args:
-        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 30.0.0)
+        installed_armnn_version (str): Arm NN version used to generate the package (e.g. 29.0.0)
         expected_armnn_version (str): Expected Arm NN version
 
     Returns:
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
index 55b6795..a2f57a3 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_network.i
@@ -29,7 +29,7 @@
                                that can not be reduced will be left in Fp32.
     m_ReduceFp32ToFp16 (bool): Reduces Fp32 network to Fp16 for faster processing. Layers
                                that can not be reduced will be left in Fp32.
-    m_ImportEnabled (bool):    Enable memory import of inport tensors.
+    m_ImportEnabled (bool):    Enable memory import.
     m_shapeInferenceMethod:    The ShapeInferenceMethod modifies how the output shapes are treated.
                                When ValidateOnly is selected, the output shapes are inferred from the input parameters
                                of the layer and any mismatch is reported.
@@ -38,7 +38,6 @@
                                with tensors which rank or dimension sizes are not specified explicitly, however this
                                information can be calculated from the inputs.
     m_ModelOptions:            List of backends optimisation options.
-    m_ExportEnabled (bool):    Enable memory export of output tensors.
 
 ") OptimizerOptions;
 
@@ -52,8 +51,7 @@
                      bool reduceFp32ToBf16 = false,
                      ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
                      bool importEnabled = false,
-                     std::vector<armnn::BackendOptions> modelOptions = {},
-                     bool exportEnabled = false);
+                     std::vector<armnn::BackendOptions> modelOptions = {});
 
     bool m_ReduceFp32ToBf16;
     bool m_ReduceFp32ToFp16;
@@ -61,7 +59,6 @@
     ShapeInferenceMethod m_shapeInferenceMethod;
     bool m_ImportEnabled;
     std::vector<armnn::BackendOptions> m_ModelOptions;
-    bool m_ExportEnabled;
 };
 %model_options_clear;