IVGCVSW-4473 Android R pre Hal 1_3 build changes

* Update ErrorStatus to V1_0::ErrorStatus
* Update Request to V1_0::Request
* Update OperandType to V1_2::OperandType
* Add namespace android::nn::hal in ArmnnDriverImpl for R only
* Add missing g_RelaxedFloat32toFloat16PerformancePowerUsageName
* Add namespace V1_0 or V1_1 where necessary
* Update Android.mk with R macro and android.hardware.neuralnetworks@1.3
* Remove androidnn.go
* include IAllocator in DriverTestHelpers
* Remove unused LOCAL_CFLAGS

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I1787f1ed6784b3bbec017536d87d49197405e853
Signed-off-by: Kevin May <kevin.may@arm.com>
diff --git a/1.0/ArmnnDriver.hpp b/1.0/ArmnnDriver.hpp
index 035d448..b18f065 100644
--- a/1.0/ArmnnDriver.hpp
+++ b/1.0/ArmnnDriver.hpp
@@ -46,8 +46,8 @@
         return armnn_driver::ArmnnDriverImpl<HalPolicy>::getSupportedOperations(m_Runtime, m_Options, model, cb);
     }
 
-    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
-                                     const android::sp<V1_0::IPreparedModelCallback>& cb) override
+    Return<V1_0::ErrorStatus> prepareModel(const V1_0::Model& model,
+                                           const android::sp<V1_0::IPreparedModelCallback>& cb) override
     {
         ALOGV("hal_1_0::ArmnnDriver::prepareModel()");
 
diff --git a/1.0/ArmnnDriverImpl.cpp b/1.0/ArmnnDriverImpl.cpp
index a35bb0e..57f828c 100644
--- a/1.0/ArmnnDriverImpl.cpp
+++ b/1.0/ArmnnDriverImpl.cpp
@@ -43,7 +43,7 @@
         capabilities.quantized8Performance.powerUsage =
             ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f);
 
-        cb(ErrorStatus::NONE, capabilities);
+        cb(V1_0::ErrorStatus::NONE, capabilities);
     }
     else
     {
@@ -52,7 +52,7 @@
         capabilities.quantized8Performance.execTime   = 0;
         capabilities.quantized8Performance.powerUsage = 0;
 
-        cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
+        cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
     }
 
     return Void();
diff --git a/1.0/ArmnnDriverImpl.hpp b/1.0/ArmnnDriverImpl.hpp
index 7f033e0..bb93e2e 100644
--- a/1.0/ArmnnDriverImpl.hpp
+++ b/1.0/ArmnnDriverImpl.hpp
@@ -11,6 +11,10 @@
 
 #include <armnn/ArmNN.hpp>
 
+#ifdef ARMNN_ANDROID_R
+using namespace android::nn::hal;
+#endif
+
 namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
 
 namespace armnn_driver
diff --git a/1.1/ArmnnDriver.hpp b/1.1/ArmnnDriver.hpp
index baae635..a6849ab 100644
--- a/1.1/ArmnnDriver.hpp
+++ b/1.1/ArmnnDriver.hpp
@@ -33,6 +33,7 @@
     ~ArmnnDriver() {}
 
 public:
+
     Return<void> getCapabilities(V1_0::IDevice::getCapabilities_cb cb) override
     {
         ALOGV("hal_1_1::ArmnnDriver::getCapabilities()");
@@ -51,8 +52,8 @@
                                                                                          cb);
     }
 
-    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
-                                     const android::sp<V1_0::IPreparedModelCallback>& cb) override
+    Return<V1_0::ErrorStatus> prepareModel(const V1_0::Model& model,
+                                           const android::sp<V1_0::IPreparedModelCallback>& cb) override
     {
         ALOGV("hal_1_1::ArmnnDriver::prepareModel()");
 
@@ -81,9 +82,9 @@
                                                                                          cb);
     }
 
-    Return<ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
-                                         V1_1::ExecutionPreference preference,
-                                         const android::sp<V1_0::IPreparedModelCallback>& cb) override
+    Return<V1_0::ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
+                                               V1_1::ExecutionPreference preference,
+                                               const android::sp<V1_0::IPreparedModelCallback>& cb) override
     {
         ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1()");
 
@@ -92,8 +93,8 @@
               preference == ExecutionPreference::SUSTAINED_SPEED))
         {
             ALOGV("hal_1_1::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
-            cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
-            return ErrorStatus::INVALID_ARGUMENT;
+            cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
+            return V1_0::ErrorStatus::INVALID_ARGUMENT;
         }
 
         return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::prepareModel(m_Runtime,
diff --git a/1.1/ArmnnDriverImpl.cpp b/1.1/ArmnnDriverImpl.cpp
index d8939a0..1d1aaa7 100644
--- a/1.1/ArmnnDriverImpl.cpp
+++ b/1.1/ArmnnDriverImpl.cpp
@@ -11,11 +11,12 @@
 namespace
 {
 
-const char *g_Float32PerformanceExecTimeName             = "ArmNN.float32Performance.execTime";
-const char *g_Float32PerformancePowerUsageName           = "ArmNN.float32Performance.powerUsage";
-const char *g_Quantized8PerformanceExecTimeName          = "ArmNN.quantized8Performance.execTime";
-const char *g_Quantized8PerformancePowerUsageName        = "ArmNN.quantized8Performance.powerUsage";
-const char *g_RelaxedFloat32toFloat16PerformanceExecTime = "ArmNN.relaxedFloat32toFloat16Performance.execTime";
+const char *g_Float32PerformanceExecTimeName                   = "ArmNN.float32Performance.execTime";
+const char *g_Float32PerformancePowerUsageName                 = "ArmNN.float32Performance.powerUsage";
+const char *g_Quantized8PerformanceExecTimeName                = "ArmNN.quantized8Performance.execTime";
+const char *g_Quantized8PerformancePowerUsageName              = "ArmNN.quantized8Performance.powerUsage";
+const char *g_RelaxedFloat32toFloat16PerformanceExecTime       = "ArmNN.relaxedFloat32toFloat16Performance.execTime";
+const char *g_RelaxedFloat32toFloat16PerformancePowerUsageName = "ArmNN.relaxedFloat32toFloat16Performance.powerUsage";
 
 } // anonymous namespace
 
@@ -47,17 +48,21 @@
         capabilities.relaxedFloat32toFloat16Performance.execTime =
             ParseSystemProperty(g_RelaxedFloat32toFloat16PerformanceExecTime, .1f);
 
-        cb(ErrorStatus::NONE, capabilities);
+        capabilities.relaxedFloat32toFloat16Performance.powerUsage =
+            ParseSystemProperty(g_RelaxedFloat32toFloat16PerformancePowerUsageName, .1f);
+
+        cb(V1_0::ErrorStatus::NONE, capabilities);
     }
     else
     {
-        capabilities.float32Performance.execTime                 = 0;
-        capabilities.float32Performance.powerUsage               = 0;
-        capabilities.quantized8Performance.execTime              = 0;
-        capabilities.quantized8Performance.powerUsage            = 0;
-        capabilities.relaxedFloat32toFloat16Performance.execTime = 0;
+        capabilities.float32Performance.execTime                   = 0;
+        capabilities.float32Performance.powerUsage                 = 0;
+        capabilities.quantized8Performance.execTime                = 0;
+        capabilities.quantized8Performance.powerUsage              = 0;
+        capabilities.relaxedFloat32toFloat16Performance.execTime   = 0;
+        capabilities.relaxedFloat32toFloat16Performance.powerUsage = 0;
 
-        cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
+        cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
     }
 
     return Void();
diff --git a/1.1/ArmnnDriverImpl.hpp b/1.1/ArmnnDriverImpl.hpp
index 4308bac..f49dee0 100644
--- a/1.1/ArmnnDriverImpl.hpp
+++ b/1.1/ArmnnDriverImpl.hpp
@@ -11,6 +11,14 @@
 
 #include <armnn/ArmNN.hpp>
 
+#ifdef ARMNN_ANDROID_R
+using namespace android::nn::hal;
+#endif
+
+
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+
 namespace armnn_driver
 {
 namespace hal_1_1
diff --git a/1.1/HalPolicy.hpp b/1.1/HalPolicy.hpp
index dd8558b..806686b 100644
--- a/1.1/HalPolicy.hpp
+++ b/1.1/HalPolicy.hpp
@@ -9,6 +9,8 @@
 
 #include <HalInterfaces.h>
 
+namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
+
 namespace armnn_driver
 {
 namespace hal_1_1
diff --git a/1.2/ArmnnDriver.hpp b/1.2/ArmnnDriver.hpp
index 177cab6..6dba2e9 100644
--- a/1.2/ArmnnDriver.hpp
+++ b/1.2/ArmnnDriver.hpp
@@ -29,6 +29,7 @@
 class ArmnnDriver : public ArmnnDevice, public V1_2::IDevice
 {
 public:
+
     ArmnnDriver(DriverOptions options)
         : ArmnnDevice(std::move(options))
     {
@@ -57,8 +58,8 @@
                                                                                          cb);
     }
 
-    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
-                                     const android::sp<V1_0::IPreparedModelCallback>& cb) override
+    Return<V1_0::ErrorStatus> prepareModel(const V1_0::Model& model,
+                                           const android::sp<V1_0::IPreparedModelCallback>& cb) override
     {
         ALOGV("hal_1_2::ArmnnDriver::prepareModel()");
 
@@ -86,9 +87,9 @@
                                                                                          cb);
     }
 
-    Return<ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
-                                         V1_1::ExecutionPreference preference,
-                                         const android::sp<V1_0::IPreparedModelCallback>& cb) override
+    Return<V1_0::ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
+                                               V1_1::ExecutionPreference preference,
+                                               const android::sp<V1_0::IPreparedModelCallback>& cb) override
     {
         ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1()");
 
@@ -97,8 +98,8 @@
               preference == ExecutionPreference::SUSTAINED_SPEED))
         {
             ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
-            cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
-            return ErrorStatus::INVALID_ARGUMENT;
+            cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
+            return V1_0::ErrorStatus::INVALID_ARGUMENT;
         }
 
         return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::prepareModel(m_Runtime,
@@ -121,7 +122,7 @@
     {
         ALOGV("hal_1_2::ArmnnDriver::getVersionString()");
 
-        cb(ErrorStatus::NONE, "ArmNN");
+        cb(V1_0::ErrorStatus::NONE, "ArmNN");
         return Void();
     }
 
@@ -129,22 +130,22 @@
     {
         ALOGV("hal_1_2::ArmnnDriver::getType()");
 
-        cb(ErrorStatus::NONE, V1_2::DeviceType::CPU);
+        cb(V1_0::ErrorStatus::NONE, V1_2::DeviceType::CPU);
         return Void();
     }
 
-    Return<ErrorStatus> prepareModelFromCache(
+    Return<V1_0::ErrorStatus> prepareModelFromCache(
             const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
             const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
             const HidlToken&,
             const sp<V1_2::IPreparedModelCallback>& callback)
     {
         ALOGV("hal_1_2::ArmnnDriver::prepareModelFromCache()");
-        callback->notify_1_2(ErrorStatus::GENERAL_FAILURE, nullptr);
-        return ErrorStatus::GENERAL_FAILURE;
+        callback->notify_1_2(V1_0::ErrorStatus::GENERAL_FAILURE, nullptr);
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
 
-    Return<ErrorStatus> prepareModel_1_2(const V1_2::Model& model, V1_1::ExecutionPreference preference,
+    Return<V1_0::ErrorStatus> prepareModel_1_2(const V1_2::Model& model, V1_1::ExecutionPreference preference,
             const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
             const android::hardware::hidl_vec<android::hardware::hidl_handle>&, const HidlToken&,
             const android::sp<V1_2::IPreparedModelCallback>& cb)
@@ -156,8 +157,8 @@
               preference == ExecutionPreference::SUSTAINED_SPEED))
         {
             ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_2: Invalid execution preference");
-            cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
-            return ErrorStatus::INVALID_ARGUMENT;
+            cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
+            return V1_0::ErrorStatus::INVALID_ARGUMENT;
         }
 
         return ArmnnDriverImpl::prepareArmnnModel_1_2(m_Runtime,
@@ -172,7 +173,7 @@
     Return<void> getSupportedExtensions(getSupportedExtensions_cb cb)
     {
         ALOGV("hal_1_2::ArmnnDriver::getSupportedExtensions()");
-        cb(ErrorStatus::NONE, {/* No extensions. */});
+        cb(V1_0::ErrorStatus::NONE, {/* No extensions. */});
         return Void();
     }
 
@@ -199,7 +200,7 @@
         ALOGV("hal_1_2::ArmnnDriver::getSupportedExtensions()");
 
         // Set both numbers to be 0 for cache not supported.
-        cb(ErrorStatus::NONE, 0, 0);
+        cb(V1_0::ErrorStatus::NONE, 0, 0);
         return Void();
     }
 };
diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp
index 691156f..bfa730b 100644
--- a/1.2/ArmnnDriverImpl.cpp
+++ b/1.2/ArmnnDriverImpl.cpp
@@ -57,7 +57,7 @@
 
 
 void NotifyCallbackAndCheck(const sp<V1_2::IPreparedModelCallback>& callback,
-                            ErrorStatus errorStatus,
+                            V1_0::ErrorStatus errorStatus,
                             const sp<V1_2::IPreparedModel>& preparedModelPtr)
 {
     Return<void> returned = callback->notify_1_2(errorStatus, preparedModelPtr);
@@ -69,9 +69,9 @@
     }
 }
 
-Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
-                                     const std::string& message,
-                                     const sp<V1_2::IPreparedModelCallback>& callback)
+Return<V1_0::ErrorStatus> FailPrepareModel(V1_0::ErrorStatus error,
+                                           const std::string& message,
+                                           const sp<V1_2::IPreparedModelCallback>& callback)
 {
     ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str());
     NotifyCallbackAndCheck(callback, error, nullptr);
@@ -85,29 +85,30 @@
 namespace hal_1_2
 {
 
-Return<ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime,
-                                                           const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
-                                                           const DriverOptions& options,
-                                                           const V1_2::Model& model,
-                                                           const sp<V1_2::IPreparedModelCallback>& cb,
-                                                           bool float32ToFloat16)
+Return<V1_0::ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(
+       const armnn::IRuntimePtr& runtime,
+       const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+       const DriverOptions& options,
+       const V1_2::Model& model,
+       const sp<V1_2::IPreparedModelCallback>& cb,
+       bool float32ToFloat16)
 {
     ALOGV("ArmnnDriverImpl::prepareArmnnModel_1_2()");
 
     if (cb.get() == nullptr)
     {
         ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel");
-        return ErrorStatus::INVALID_ARGUMENT;
+        return V1_0::ErrorStatus::INVALID_ARGUMENT;
     }
 
     if (!runtime)
     {
-        return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
     }
 
     if (!android::nn::validateModel(model))
     {
-        return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
     }
 
     // Deliberately ignore any unsupported operations requested by the options -
@@ -120,8 +121,8 @@
 
     if (modelConverter.GetConversionResult() != ConversionResult::Success)
     {
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Optimize the network
@@ -142,8 +143,8 @@
     {
         std::stringstream message;
         message << "Exception (" << e.what() << ") caught from optimize.";
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Check that the optimized network is valid.
@@ -155,8 +156,8 @@
         {
             message << "\n" << msg;
         }
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Export the optimized network graph to a dot file if an output dump directory
@@ -170,15 +171,15 @@
     {
         if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
         {
-            return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
+            return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
         }
     }
     catch (std::exception& e)
     {
         std::stringstream message;
         message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Now that we have a networkId for the graph rename the dump file to use it
@@ -199,7 +200,7 @@
     // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
     if (!preparedModel->ExecuteWithDummyInputs())
     {
-        return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
     }
 
     if (clTunedParameters &&
@@ -217,9 +218,9 @@
         }
     }
 
-    NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel.release());
+    NotifyCallbackAndCheck(cb, V1_0::ErrorStatus::NONE, preparedModel.release());
 
-    return ErrorStatus::NONE;
+    return V1_0::ErrorStatus::NONE;
 }
 
 Return<void> ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runtime,
@@ -240,52 +241,56 @@
                 ParseSystemProperty(g_RelaxedFloat32toFloat16PerformancePowerUsage, defaultValue);
 
         // Set the base value for all operand types
+        #ifdef ARMNN_ANDROID_R
+        capabilities.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_2>({FLT_MAX, FLT_MAX});
+        #else
         capabilities.operandPerformance = nonExtensionOperandPerformance({FLT_MAX, FLT_MAX});
+        #endif
 
         // Load supported operand types
-        update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorFloat32PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat32PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::FLOAT32,
+        update(&capabilities.operandPerformance, V1_2::OperandType::FLOAT32,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeFloat32PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeFloat32PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT16,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT16,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorFloat16PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat16PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::FLOAT16,
+        update(&capabilities.operandPerformance, V1_2::OperandType::FLOAT16,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeFloat16PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeFloat16PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_SYMM,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_SYMM,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT16_SYMM,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT16_SYMM,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
                {
                    .execTime =
                    ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerChannelPerformanceExecTime, defaultValue),
@@ -293,19 +298,19 @@
                    ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerChannelPerformancePowerUsage, defaultValue)
                });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_INT32,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_INT32,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorInt32PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorInt32PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::INT32,
+        update(&capabilities.operandPerformance, V1_2::OperandType::INT32,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeInt32PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeInt32PerformancePowerUsage, defaultValue)
                 });
 
-        cb(ErrorStatus::NONE, capabilities);
+        cb(V1_0::ErrorStatus::NONE, capabilities);
     }
     else
     {
@@ -313,13 +318,17 @@
         capabilities.relaxedFloat32toFloat16PerformanceTensor.execTime = 0;
 
         // Set the base value for all operand types
+        #ifdef ARMNN_ANDROID_R
+        capabilities.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_2>({0.f, 0.0f});
+        #else
         capabilities.operandPerformance = nonExtensionOperandPerformance({0.f, 0.0f});
+        #endif
 
-        cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
+        cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
     }
 
     return Void();
 }
 
 } // namespace hal_1_2
-} // namespace armnn_driver
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.2/ArmnnDriverImpl.hpp b/1.2/ArmnnDriverImpl.hpp
index b3c6507..73ba133 100644
--- a/1.2/ArmnnDriverImpl.hpp
+++ b/1.2/ArmnnDriverImpl.hpp
@@ -11,6 +11,13 @@
 
 #include <armnn/ArmNN.hpp>
 
+#ifdef ARMNN_ANDROID_R
+using namespace android::nn::hal;
+#endif
+
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+
 namespace armnn_driver
 {
 namespace hal_1_2
@@ -19,12 +26,12 @@
 class ArmnnDriverImpl
 {
 public:
-    static Return<ErrorStatus> prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime,
-                                                     const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
-                                                     const DriverOptions& options,
-                                                     const V1_2::Model& model,
-                                                     const android::sp<V1_2::IPreparedModelCallback>& cb,
-                                                     bool float32ToFloat16 = false);
+    static Return<V1_0::ErrorStatus> prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime,
+                                                           const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+                                                           const DriverOptions& options,
+                                                           const V1_2::Model& model,
+                                                           const android::sp<V1_2::IPreparedModelCallback>& cb,
+                                                           bool float32ToFloat16 = false);
 
     static Return<void> getCapabilities_1_2(const armnn::IRuntimePtr& runtime,
                                             V1_2::IDevice::getCapabilities_1_2_cb cb);
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 8e4ef8a..b3ccc47 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -26,9 +26,9 @@
 namespace
 {
 
-bool IsQSymmDequantizeForWeights(const Operation& operation, const Model& model)
+bool IsQSymmDequantizeForWeights(const HalPolicy::Operation& operation, const HalPolicy::Model& model)
 {
-    const Operand* operand = GetInputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+    const HalPolicy::Operand* operand = GetInputOperand<hal_1_2::HalPolicy>(operation, 0, model);
     if (!operand)
     {
         return false;
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index e0a5c2f..cd4f2da 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -11,6 +11,8 @@
 
 #include <armnn/Types.hpp>
 
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+
 namespace armnn_driver
 {
 namespace hal_1_2
diff --git a/Android.bp b/Android.bp
index 7632c8c..a2c8053 100644
--- a/Android.bp
+++ b/Android.bp
@@ -3,23 +3,6 @@
 // SPDX-License-Identifier: MIT
 //
 
-bootstrap_go_package {
-    name: "armnn_nn_driver",
-    pkgPath: "android-nn-driver",
-    deps: [
-        "blueprint",
-        "blueprint-pathtools",
-        "blueprint-proptools",
-        "soong",
-        "soong-android",
-        "soong-cc",
-    ],
-    srcs: [
-        "androidnn.go",
-    ],
-    pluginFor: [ "soong_build" ],
-}
-
 ////////////////////////////////////////////
 //                                        //
 //           static boost libs            //
diff --git a/Android.mk b/Android.mk
index 73ae8ca..6cc85ee 100644
--- a/Android.mk
+++ b/Android.mk
@@ -8,6 +8,7 @@
 
 P_OR_LATER := 0
 Q_OR_LATER := 0
+R_OR_LATER := 0
 
 ifeq ($(PLATFORM_VERSION),9)
 P_OR_LATER := 1
@@ -25,6 +26,12 @@
 Q_OR_LATER := 1
 endif # PLATFORM_VERSION == Q
 
+ifeq ($(PLATFORM_VERSION),R)
+P_OR_LATER := 1
+Q_OR_LATER := 1
+R_OR_LATER := 1
+endif # PLATFORM_VERSION == R
+
 CPP_VERSION := c++14
 
 ifeq ($(Q_OR_LATER),1)
@@ -55,6 +62,10 @@
 ARMNN_REF_ENABLED := 0
 endif
 
+ifeq ($(PLATFORM_VERSION),R)
+ARMNN_COMPUTE_CL_ENABLED := 0
+endif # PLATFORM_VERSION == R
+
 #######################
 # libarmnn-driver@1.0 #
 #######################
@@ -84,12 +95,11 @@
         -Werror \
         -Wno-format-security
 
-ifeq ($(P_OR_LATER),1)
-# Required to build with the changes made to the Android ML framework starting from Android P,
-# regardless of the HAL version used for the build.
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(PLATFORM_VERSION),R)
 LOCAL_CFLAGS+= \
-        -DARMNN_ANDROID_P
-endif # PLATFORM_VERSION == 9
+        -DARMNN_ANDROID_R
+endif # R or later
 
 ifeq ($(ARMNN_DRIVER_DEBUG),1)
 LOCAL_CFLAGS+= \
@@ -164,7 +174,12 @@
         libfmq \
         libcutils \
         android.hardware.neuralnetworks@1.2
-endif # PLATFORM_VERSION == Q
+endif # Q or later
+
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # R or later
 
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
@@ -205,7 +220,6 @@
         -fexceptions \
         -Werror \
         -Wno-format-security \
-        -DARMNN_ANDROID_P \
         -DARMNN_ANDROID_NN_V1_1
 
 ifeq ($(ARMNN_DRIVER_DEBUG),1)
@@ -218,6 +232,12 @@
         -DBOOST_NO_AUTO_PTR
 endif # PLATFORM_VERSION == Q or later
 
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(PLATFORM_VERSION),R)
+LOCAL_CFLAGS+= \
+        -DARMNN_ANDROID_R
+endif # R or later
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_CFLAGS += \
         -DARMCOMPUTECL_ENABLED
@@ -279,6 +299,11 @@
         android.hardware.neuralnetworks@1.2
 endif # PLATFORM_VERSION == Q
 
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # R or later
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
         libOpenCL
@@ -314,7 +339,7 @@
         -fexceptions \
         -Werror \
         -Wno-format-security \
-        -DARMNN_ANDROID_Q \
+        -DBOOST_NO_AUTO_PTR \
         -DARMNN_ANDROID_NN_V1_2
 
 ifeq ($(ARMNN_DRIVER_DEBUG),1)
@@ -322,10 +347,11 @@
         -UNDEBUG
 endif # ARMNN_DRIVER_DEBUG == 1
 
-ifeq ($(Q_OR_LATER),1)
-LOCAL_CFLAGS += \
-        -DBOOST_NO_AUTO_PTR
-endif # PLATFORM_VERSION == Q or later
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(PLATFORM_VERSION),R)
+LOCAL_CFLAGS+= \
+        -DARMNN_ANDROID_R
+endif # R or later
 
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_CFLAGS += \
@@ -387,6 +413,11 @@
         android.hardware.neuralnetworks@1.1 \
         android.hardware.neuralnetworks@1.2
 
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # R or later
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
         libOpenCL
@@ -433,6 +464,12 @@
         -DBOOST_NO_AUTO_PTR
 endif # PLATFORM_VERSION == Q or later
 
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(PLATFORM_VERSION),R)
+LOCAL_CFLAGS+= \
+        -DARMNN_ANDROID_R
+endif # R or later
+
 LOCAL_SRC_FILES := \
         service.cpp
 
@@ -468,6 +505,7 @@
 LOCAL_SHARED_LIBRARIES+= \
         android.hardware.neuralnetworks@1.1
 endif # PLATFORM_VERSION == 9
+
 ifeq ($(Q_OR_LATER),1)
 LOCAL_SHARED_LIBRARIES+= \
         libnativewindow \
@@ -477,6 +515,11 @@
         android.hardware.neuralnetworks@1.2
 endif # PLATFORM_VERSION == Q
 
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # R or later
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
         libOpenCL
@@ -525,6 +568,12 @@
         -DBOOST_NO_AUTO_PTR
 endif # PLATFORM_VERSION == Q or later
 
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(PLATFORM_VERSION),R)
+LOCAL_CFLAGS+= \
+        -DARMNN_ANDROID_R
+endif # R or later
+
 LOCAL_SRC_FILES := \
         service.cpp
 
@@ -564,6 +613,11 @@
         android.hardware.neuralnetworks@1.2
 endif # PLATFORM_VERSION == Q
 
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # PLATFORM_VERSION == R
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
         libOpenCL
@@ -605,6 +659,12 @@
         -UNDEBUG
 endif # ARMNN_DRIVER_DEBUG == 1
 
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(PLATFORM_VERSION),R)
+LOCAL_CFLAGS+= \
+        -DARMNN_ANDROID_R
+endif # R or later
+
 LOCAL_SRC_FILES := \
         service.cpp
 
@@ -640,6 +700,11 @@
         android.hardware.neuralnetworks@1.1 \
         android.hardware.neuralnetworks@1.2
 
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # R or later
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
         libOpenCL
diff --git a/ArmnnDriverImpl.cpp b/ArmnnDriverImpl.cpp
index 14af3c0..eab9598 100644
--- a/ArmnnDriverImpl.cpp
+++ b/ArmnnDriverImpl.cpp
@@ -26,7 +26,7 @@
 {
 
 void NotifyCallbackAndCheck(const sp<V1_0::IPreparedModelCallback>& callback,
-                            ErrorStatus errorStatus,
+                            V1_0::ErrorStatus errorStatus,
                             const sp<V1_0::IPreparedModel>& preparedModelPtr)
 {
     Return<void> returned = callback->notify(errorStatus, preparedModelPtr);
@@ -38,9 +38,9 @@
     }
 }
 
-Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
-                                     const string& message,
-                                     const sp<V1_0::IPreparedModelCallback>& callback)
+Return<V1_0::ErrorStatus> FailPrepareModel(V1_0::ErrorStatus error,
+                                           const string& message,
+                                           const sp<V1_0::IPreparedModelCallback>& callback)
 {
     ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str());
     NotifyCallbackAndCheck(callback, error, nullptr);
@@ -54,7 +54,7 @@
 {
 
 template<typename HalPolicy>
-Return<ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
+Return<V1_0::ErrorStatus> ArmnnDriverImpl<HalPolicy>::prepareModel(
         const armnn::IRuntimePtr& runtime,
         const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
         const DriverOptions& options,
@@ -67,17 +67,17 @@
     if (cb.get() == nullptr)
     {
         ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel");
-        return ErrorStatus::INVALID_ARGUMENT;
+        return V1_0::ErrorStatus::INVALID_ARGUMENT;
     }
 
     if (!runtime)
     {
-        return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
     }
 
     if (!android::nn::validateModel(model))
     {
-        return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
     }
 
     // Deliberately ignore any unsupported operations requested by the options -
@@ -90,8 +90,8 @@
 
     if (modelConverter.GetConversionResult() != ConversionResult::Success)
     {
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Optimize the network
@@ -112,8 +112,8 @@
     {
         stringstream message;
         message << "Exception (" << e.what() << ") caught from optimize.";
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Check that the optimized network is valid.
@@ -125,8 +125,8 @@
         {
             message << "\n" << msg;
         }
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Export the optimized network graph to a dot file if an output dump directory
@@ -139,15 +139,15 @@
     {
         if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
         {
-            return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
+            return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
         }
     }
     catch (std::exception& e)
     {
         stringstream message;
         message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Now that we have a networkId for the graph rename the dump file to use it
@@ -168,7 +168,7 @@
     // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
     if (!preparedModel->ExecuteWithDummyInputs())
     {
-        return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
     }
 
     if (clTunedParameters &&
@@ -186,9 +186,9 @@
         }
     }
 
-    NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel);
+    NotifyCallbackAndCheck(cb, V1_0::ErrorStatus::NONE, preparedModel);
 
-    return ErrorStatus::NONE;
+    return V1_0::ErrorStatus::NONE;
 }
 
 template<typename HalPolicy>
@@ -227,14 +227,14 @@
 
     if (!runtime)
     {
-        cb(ErrorStatus::DEVICE_UNAVAILABLE, result);
+        cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, result);
         return Void();
     }
 
     // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway.
     if (!android::nn::validateModel(model))
     {
-        cb(ErrorStatus::INVALID_ARGUMENT, result);
+        cb(V1_0::ErrorStatus::INVALID_ARGUMENT, result);
         return Void();
     }
 
@@ -246,7 +246,7 @@
     if (modelConverter.GetConversionResult() != ConversionResult::Success
             && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature)
     {
-        cb(ErrorStatus::GENERAL_FAILURE, result);
+        cb(V1_0::ErrorStatus::GENERAL_FAILURE, result);
         return Void();
     }
 
@@ -259,7 +259,7 @@
         result.push_back(operationSupported);
     }
 
-    cb(ErrorStatus::NONE, result);
+    cb(V1_0::ErrorStatus::NONE, result);
     return Void();
 }
 
diff --git a/ArmnnDriverImpl.hpp b/ArmnnDriverImpl.hpp
index 49f0975..c5b1778 100644
--- a/ArmnnDriverImpl.hpp
+++ b/ArmnnDriverImpl.hpp
@@ -9,6 +9,10 @@
 
 #include <HalInterfaces.h>
 
+#ifdef ARMNN_ANDROID_R
+using namespace android::nn::hal;
+#endif
+
 namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
 namespace V1_1 = ::android::hardware::neuralnetworks::V1_1;
 
@@ -32,7 +36,7 @@
             const HalModel& model,
             HalGetSupportedOperations_cb);
 
-    static Return<ErrorStatus> prepareModel(
+    static Return<V1_0::ErrorStatus> prepareModel(
             const armnn::IRuntimePtr& runtime,
             const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
             const DriverOptions& options,
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 0899430..2cd560d 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -11,12 +11,8 @@
 #include <boost/format.hpp>
 #include <log/log.h>
 #include <OperationsUtils.h>
-
-#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q)
-// The headers of the ML framework have changed between Android O and Android P.
-// The validation functions have been moved into their own header, ValidateHal.h.
 #include <ValidateHal.h>
-#endif
+
 
 #include <cassert>
 #include <cinttypes>
@@ -27,7 +23,7 @@
 {
 using namespace armnn_driver;
 
-void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback, ErrorStatus errorStatus,
+void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback, V1_0::ErrorStatus errorStatus,
                             std::string callingFunction)
 {
     Return<void> returned = callback->notify(errorStatus);
@@ -139,21 +135,22 @@
 }
 
 template<typename HalVersion>
-Return<ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(const Request& request,
-                                                            const ::android::sp<V1_0::IExecutionCallback>& callback)
+Return<V1_0::ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(
+    const V1_0::Request& request,
+    const ::android::sp<V1_0::IExecutionCallback>& callback)
 {
     ALOGV("ArmnnPreparedModel::execute(): %s", GetModelSummary(m_Model).c_str());
     m_RequestCount++;
 
     if (callback.get() == nullptr) {
         ALOGE("ArmnnPreparedModel::execute invalid callback passed");
-        return ErrorStatus::INVALID_ARGUMENT;
+        return V1_0::ErrorStatus::INVALID_ARGUMENT;
     }
 
     if (!android::nn::validateRequest(request, m_Model))
     {
-        NotifyCallbackAndCheck(callback, ErrorStatus::INVALID_ARGUMENT, "ArmnnPreparedModel::execute");
-        return ErrorStatus::INVALID_ARGUMENT;
+        NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::INVALID_ARGUMENT, "ArmnnPreparedModel::execute");
+        return V1_0::ErrorStatus::INVALID_ARGUMENT;
     }
 
     if (!m_RequestInputsAndOutputsDumpDir.empty())
@@ -170,8 +167,8 @@
     auto pMemPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
     if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools))
     {
-        NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
-        return ErrorStatus::GENERAL_FAILURE;
+        NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
 
     // add the inputs and outputs with their data
@@ -187,7 +184,7 @@
             if (inputTensor.GetMemoryArea() == nullptr)
             {
                 ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
-                return ErrorStatus::GENERAL_FAILURE;
+                return V1_0::ErrorStatus::GENERAL_FAILURE;
             }
 
             pInputTensors->emplace_back(i, inputTensor);
@@ -203,7 +200,7 @@
             if (outputTensor.GetMemoryArea() == nullptr)
             {
                 ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
-                return ErrorStatus::GENERAL_FAILURE;
+                return V1_0::ErrorStatus::GENERAL_FAILURE;
             }
 
             pOutputTensors->emplace_back(i, outputTensor);
@@ -212,19 +209,19 @@
     catch (armnn::Exception& e)
     {
         ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
-        NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
-        return ErrorStatus::GENERAL_FAILURE;
+        NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
     catch (std::exception& e)
     {
         ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what());
-        NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
-        return ErrorStatus::GENERAL_FAILURE;
+        NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
 
     ALOGV("ArmnnPreparedModel::execute(...) before PostMsg");
 
-    auto cb = [callback](ErrorStatus errorStatus, std::string callingFunction)
+    auto cb = [callback](V1_0::ErrorStatus errorStatus, std::string callingFunction)
     {
         NotifyCallbackAndCheck(callback, errorStatus, callingFunction);
     };
@@ -234,7 +231,7 @@
     // post the request for asynchronous execution
     m_RequestThread.PostMsg(this, pMemPools, pInputTensors, pOutputTensors, armnnCb);
     ALOGV("ArmnnPreparedModel::execute(...) after PostMsg");
-    return ErrorStatus::NONE; // successfully queued
+    return V1_0::ErrorStatus::NONE; // successfully queued
 }
 
 template<typename HalVersion>
@@ -255,20 +252,20 @@
         if (status != armnn::Status::Success)
         {
             ALOGW("EnqueueWorkload failed");
-            cb.callback(ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
+            cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
             return;
         }
     }
     catch (armnn::Exception& e)
     {
         ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what());
-        cb.callback(ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
+        cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
         return;
     }
     catch (std::exception& e)
     {
         ALOGE("std::exception caught from EnqueueWorkload: %s", e.what());
-        cb.callback(ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
+        cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::ExecuteGraph");
         return;
     }
 
@@ -279,10 +276,16 @@
     // this is simpler and is what the CpuExecutor does.
     for (android::nn::RunTimePoolInfo& pool : *pMemPools)
     {
-        pool.update();
+        // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
+        // update() has been removed and flush() added.
+        #if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
+            pool.flush();
+        #else
+            pool.update();
+        #endif
     }
 
-    cb.callback(ErrorStatus::NONE, "ExecuteGraph");
+    cb.callback(V1_0::ErrorStatus::NONE, "ExecuteGraph");
 }
 
 template<typename HalVersion>
diff --git a/ArmnnPreparedModel.hpp b/ArmnnPreparedModel.hpp
index 33be972..270a933 100644
--- a/ArmnnPreparedModel.hpp
+++ b/ArmnnPreparedModel.hpp
@@ -38,8 +38,8 @@
 
     virtual ~ArmnnPreparedModel();
 
-    virtual Return<ErrorStatus> execute(const Request& request,
-                                        const ::android::sp<V1_0::IExecutionCallback>& callback) override;
+    virtual Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
+                                              const ::android::sp<V1_0::IExecutionCallback>& callback) override;
 
     /// execute the graph prepared from the request
     void ExecuteGraph(std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index 84ff6e2..9b79044 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -41,7 +41,7 @@
 }
 
 void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback,
-                            ErrorStatus errorStatus,
+                            V1_0::ErrorStatus errorStatus,
                             std::vector<OutputShape>,
                             const Timing,
                             std::string callingFunction)
@@ -56,7 +56,7 @@
 }
 
 void NotifyCallbackAndCheck(const ::android::sp<V1_2::IExecutionCallback>& callback,
-                            ErrorStatus errorStatus,
+                            V1_0::ErrorStatus errorStatus,
                             std::vector<OutputShape> outputShapes,
                             const Timing timing,
                             std::string callingFunction)
@@ -172,16 +172,16 @@
 }
 
 template<typename HalVersion>
-Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute(const Request& request,
+Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute(const V1_0::Request& request,
         const ::android::sp<V1_0::IExecutionCallback>& callback)
 {
     if (callback.get() == nullptr)
     {
         ALOGE("ArmnnPreparedModel_1_2::execute invalid callback passed");
-        return ErrorStatus::INVALID_ARGUMENT;
+        return V1_0::ErrorStatus::INVALID_ARGUMENT;
     }
 
-    auto cb = [callback](ErrorStatus errorStatus,
+    auto cb = [callback](V1_0::ErrorStatus errorStatus,
                          std::vector<OutputShape> outputShapes,
                          const Timing& timing,
                          std::string callingFunction)
@@ -193,17 +193,18 @@
 }
 
 template<typename HalVersion>
-Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute_1_2(const Request& request,
-                                                                     MeasureTiming measureTiming,
-                                                                     const sp<V1_2::IExecutionCallback>& callback)
+Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute_1_2(
+        const V1_0::Request& request,
+        MeasureTiming measureTiming,
+        const sp<V1_2::IExecutionCallback>& callback)
 {
     if (callback.get() == nullptr)
     {
         ALOGE("ArmnnPreparedModel_1_2::execute_1_2 invalid callback passed");
-        return ErrorStatus::INVALID_ARGUMENT;
+        return V1_0::ErrorStatus::INVALID_ARGUMENT;
     }
 
-    auto cb = [callback](ErrorStatus errorStatus,
+    auto cb = [callback](V1_0::ErrorStatus errorStatus,
                          std::vector<OutputShape> outputShapes,
                          const Timing& timing,
                          std::string callingFunction)
@@ -215,7 +216,7 @@
 }
 
 template<typename HalVersion>
-Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const Request& request,
+Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const V1_0::Request& request,
                                                                       MeasureTiming measureTiming,
                                                                       executeSynchronously_cb cb)
 {
@@ -238,7 +239,7 @@
     if (!android::nn::validateRequest(request, m_Model))
     {
         ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid request model");
-        cb(ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming);
+        cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming);
         return Void();
     }
 
@@ -252,7 +253,7 @@
 
     if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools))
     {
-        cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
+        cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
         return Void();
     }
     std::vector<OutputShape> outputShapes(request.outputs.size());
@@ -270,7 +271,7 @@
             if (inputTensor.GetMemoryArea() == nullptr)
             {
                 ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
-                cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
+                cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
                 return Void();
             }
 
@@ -288,7 +289,7 @@
             if (outputTensor.GetMemoryArea() == nullptr)
             {
                 ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
-                cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
+                cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
                 return Void();
             }
             const size_t outputSize = outputTensorInfo.GetNumBytes();
@@ -310,7 +311,7 @@
             if (bufferSize < outputSize)
             {
                 ALOGW("ArmnnPreparedModel_1_2::Execute failed");
-                cb(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, outputShapes, g_NoTiming);
+                cb(V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, outputShapes, g_NoTiming);
                 return Void();
             }
 
@@ -320,13 +321,13 @@
     catch (armnn::Exception& e)
     {
         ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
-        cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
+        cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
         return Void();
     }
     catch (std::exception& e)
     {
         ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what());
-        cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
+        cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
         return Void();
     }
 
@@ -351,20 +352,20 @@
         if (status != armnn::Status::Success)
         {
             ALOGW("EnqueueWorkload failed");
-            cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
+            cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
             return Void();
         }
     }
     catch (armnn::Exception& e)
     {
         ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what());
-        cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
+        cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
         return Void();
     }
     catch (std::exception& e)
     {
         ALOGE("std::exception caught from EnqueueWorkload: %s", e.what());
-        cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
+        cb(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming);
         return Void();
     }
 
@@ -375,8 +376,15 @@
     // this is simpler and is what the CpuExecutor does.
     for (android::nn::RunTimePoolInfo& pool : *pMemPools)
     {
-        pool.update();
+        // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
+        // update() has been removed and flush() added.
+        #if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
+            pool.flush();
+        #else
+            pool.update();
+        #endif
     }
+
     ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() after Execution");
 
     if (measureTiming == MeasureTiming::YES)
@@ -387,11 +395,11 @@
         timing.timeInDriver = MicrosecondsDuration(driverEnd, driverStart);
         ALOGV("ArmnnPreparedModel_1_2::executeSynchronously timing Device = %lu Driver = %lu", timing.timeOnDevice,
                 timing.timeInDriver);
-        cb(ErrorStatus::NONE, outputShapes, timing);
+        cb(V1_0::ErrorStatus::NONE, outputShapes, timing);
     }
     else
     {
-        cb(ErrorStatus::NONE, outputShapes, g_NoTiming);
+        cb(V1_0::ErrorStatus::NONE, outputShapes, g_NoTiming);
     }
     return Void();
 }
@@ -402,7 +410,7 @@
 ///         ml/+/refs/tags/android-10.0.0_r20/nn/common/ExecutionBurstServer.cpp
 class ArmnnBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCache {
 public:
-    ArmnnBurstExecutorWithCache(IPreparedModel* preparedModel)
+    ArmnnBurstExecutorWithCache(V1_2::IPreparedModel* preparedModel)
         : m_PreparedModel(preparedModel)
     {}
 
@@ -422,8 +430,8 @@
         m_MemoryCache.erase(slot);
     }
 
-    std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
-            const Request& request, const std::vector<int32_t>& slots,
+    std::tuple<V1_0::ErrorStatus, hidl_vec<OutputShape>, Timing> execute(
+            const V1_0::Request& request, const std::vector<int32_t>& slots,
             MeasureTiming measure) override
     {
         ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache::execute");
@@ -434,14 +442,14 @@
             return m_MemoryCache[slot];
         });
 
-        Request fullRequest = request;
+        V1_0::Request fullRequest = request;
         fullRequest.pools = std::move(pools);
 
         // Setup Callback
-        ErrorStatus returnedStatus = ErrorStatus::GENERAL_FAILURE;
+        V1_0::ErrorStatus returnedStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
         hidl_vec<OutputShape> returnedOutputShapes;
         Timing returnedTiming;
-        auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](ErrorStatus status,
+        auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](V1_0::ErrorStatus status,
                                                                             const hidl_vec<OutputShape>& outputShapes,
                                                                             const Timing& timing)
         {
@@ -454,7 +462,7 @@
         ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache executing");
         const Return<void> ret = m_PreparedModel->executeSynchronously(fullRequest, measure, cb);
 
-        if (!ret.isOk() || returnedStatus != ErrorStatus::NONE)
+        if (!ret.isOk() || returnedStatus != V1_0::ErrorStatus::NONE)
         {
             ALOGE("ArmnnPreparedModel_1_2::BurstExecutorWithCache::error executing");
         }
@@ -462,7 +470,7 @@
     }
 
 private:
-    IPreparedModel* const m_PreparedModel;
+    V1_2::IPreparedModel* const m_PreparedModel;
     std::map<int, hidl_memory> m_MemoryCache;
 };
 
@@ -484,11 +492,11 @@
 
     if (burst == nullptr)
     {
-        cb(ErrorStatus::GENERAL_FAILURE, {});
+        cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
     }
     else
     {
-        cb(ErrorStatus::NONE, burst);
+        cb(V1_0::ErrorStatus::NONE, burst);
     }
     return Void();
 }
@@ -546,7 +554,7 @@
         if (status != armnn::Status::Success)
         {
             ALOGW("EnqueueWorkload failed");
-            cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming,
+            cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming,
                     "ArmnnPreparedModel_1_2::ExecuteGraph");
             return;
         }
@@ -554,13 +562,13 @@
     catch (armnn::Exception& e)
     {
         ALOGW("armnn:Exception caught from EnqueueWorkload: %s", e.what());
-        cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
+        cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
         return;
     }
     catch (std::exception& e)
     {
         ALOGE("std::exception caught from EnqueueWorkload: %s", e.what());
-        cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
+        cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
         return;
     }
 
@@ -571,7 +579,13 @@
     // this is simpler and is what the CpuExecutor does.
     for (android::nn::RunTimePoolInfo& pool : *pMemPools)
     {
-        pool.update();
+        // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
+        // update() has been removed and flush() added.
+        #if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
+            pool.flush();
+        #else
+            pool.update();
+        #endif
     }
 
     if (cb.measureTiming == MeasureTiming::YES)
@@ -580,9 +594,9 @@
         Timing timing;
         timing.timeOnDevice = MicrosecondsDuration(deviceEnd, deviceStart);
         timing.timeInDriver = MicrosecondsDuration(driverEnd, cb.driverStart);
-        cb.callback(ErrorStatus::NONE, outputShapes, timing, "ExecuteGraph");
+        cb.callback(V1_0::ErrorStatus::NONE, outputShapes, timing, "ExecuteGraph");
     } else {
-        cb.callback(ErrorStatus::NONE, outputShapes, g_NoTiming, "ExecuteGraph");
+        cb.callback(V1_0::ErrorStatus::NONE, outputShapes, g_NoTiming, "ExecuteGraph");
     }
 }
 
@@ -633,9 +647,9 @@
 }
 
 template<typename HalVersion>
-Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const Request& request,
-                                                                 MeasureTiming measureTiming,
-                                                                 armnnExecuteCallback_1_2 callback)
+Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const V1_0::Request& request,
+                                                                       MeasureTiming measureTiming,
+                                                                       armnnExecuteCallback_1_2 callback)
 {
     TimePoint driverStart;
 
@@ -649,8 +663,8 @@
 
     if (!android::nn::validateRequest(request, m_Model))
     {
-        callback(ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
-        return ErrorStatus::INVALID_ARGUMENT;
+        callback(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
+        return V1_0::ErrorStatus::INVALID_ARGUMENT;
     }
 
     if (!m_RequestInputsAndOutputsDumpDir.empty())
@@ -668,8 +682,8 @@
 
     if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools))
     {
-        callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
-        return ErrorStatus::GENERAL_FAILURE;
+        callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
 
     // add the inputs and outputs with their data
@@ -686,8 +700,8 @@
             if (inputTensor.GetMemoryArea() == nullptr)
             {
                 ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
-                callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
-                return ErrorStatus::GENERAL_FAILURE;
+                callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
+                return V1_0::ErrorStatus::GENERAL_FAILURE;
             }
 
             pInputTensors->emplace_back(i, inputTensor);
@@ -705,8 +719,8 @@
             if (outputTensor.GetMemoryArea() == nullptr)
             {
                 ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
-                callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
-                return ErrorStatus::GENERAL_FAILURE;
+                callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
+                return V1_0::ErrorStatus::GENERAL_FAILURE;
             }
 
             const size_t outputSize = outputTensorInfo.GetNumBytes();
@@ -729,25 +743,25 @@
             if (bufferSize < outputSize)
             {
                 ALOGW("ArmnnPreparedModel_1_2::Execute failed");
-                callback(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
+                callback(V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE,
                          outputShapes,
                          g_NoTiming,
                          "ArmnnPreparedModel_1_2::Execute");
-                return ErrorStatus::NONE;
+                return V1_0::ErrorStatus::NONE;
             }
         }
     }
     catch (armnn::Exception& e)
     {
         ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
-        callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
-        return ErrorStatus::GENERAL_FAILURE;
+        callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
     catch (std::exception& e)
     {
         ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what());
-        callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
-        return ErrorStatus::GENERAL_FAILURE;
+        callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
 
     ALOGV("ArmnnPreparedModel_1_2::execute(...) before PostMsg");
@@ -758,7 +772,7 @@
     armnnCb.driverStart = driverStart;
     m_RequestThread.PostMsg(this, pMemPools, pInputTensors, pOutputTensors, armnnCb);
     ALOGV("ArmnnPreparedModel_1_2::execute(...) after PostMsg");
-    return ErrorStatus::NONE;
+    return V1_0::ErrorStatus::NONE;
 }
 
 #ifdef ARMNN_ANDROID_NN_V1_2
diff --git a/ArmnnPreparedModel_1_2.hpp b/ArmnnPreparedModel_1_2.hpp
index b97895e..f609ef7 100644
--- a/ArmnnPreparedModel_1_2.hpp
+++ b/ArmnnPreparedModel_1_2.hpp
@@ -45,13 +45,13 @@
 
     virtual ~ArmnnPreparedModel_1_2();
 
-    virtual Return<ErrorStatus> execute(const Request& request,
-                                        const sp<V1_0::IExecutionCallback>& callback) override;
+    virtual Return<V1_0::ErrorStatus> execute(const V1_0::Request& request,
+                                              const sp<V1_0::IExecutionCallback>& callback) override;
 
-    virtual Return<ErrorStatus> execute_1_2(const Request& request, MeasureTiming measure,
-                                            const sp<V1_2::IExecutionCallback>& callback) override;
+    virtual Return<V1_0::ErrorStatus> execute_1_2(const V1_0::Request& request, MeasureTiming measure,
+                                                  const sp<V1_2::IExecutionCallback>& callback) override;
 
-    virtual Return<void> executeSynchronously(const Request &request,
+    virtual Return<void> executeSynchronously(const V1_0::Request &request,
                                               MeasureTiming measure,
                                               V1_2::IPreparedModel::executeSynchronously_cb cb) override;
 
@@ -72,9 +72,9 @@
     bool ExecuteWithDummyInputs();
 
 private:
-    Return <ErrorStatus> Execute(const Request& request,
-                                 MeasureTiming measureTiming,
-                                 armnnExecuteCallback_1_2 callback);
+    Return <V1_0::ErrorStatus> Execute(const V1_0::Request& request,
+                                       MeasureTiming measureTiming,
+                                       armnnExecuteCallback_1_2 callback);
 
     template <typename TensorBindingCollection>
     void DumpTensorsIfRequired(char const* tensorNamePrefix, const TensorBindingCollection& tensorBindings);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index eea70d7..997c9cc 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -35,6 +35,10 @@
 /// Helper classes
 ///
 
+#ifdef ARMNN_ANDROID_R
+using OperandType = android::nn::hal::OperandType;
+#endif
+
 struct ConversionData
 {
     ConversionData(const std::vector<armnn::BackendId>& backends)
diff --git a/Utils.cpp b/Utils.cpp
index 3583d62..c95f6e1 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -69,13 +69,7 @@
 
     const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
 
-    // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where
-    // "buffer" has been made private and must be accessed via the accessor method "getBuffer".
-#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q) // Use the new Android implementation.
     uint8_t* memPoolBuffer = memPool.getBuffer();
-#else // Fallback to the old Android O implementation.
-    uint8_t* memPoolBuffer = memPool.buffer;
-#endif
 
     uint8_t* memory = memPoolBuffer + location.offset;
 
diff --git a/Utils.hpp b/Utils.hpp
index c5a2e65..6256655 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -27,6 +27,10 @@
 namespace armnn_driver
 {
 
+#ifdef ARMNN_ANDROID_R
+using DataLocation = ::android::nn::hal::DataLocation;
+#endif
+
 extern const armnn::PermutationVector g_DontPermute;
 
 template <typename OperandType>
diff --git a/androidnn.go b/androidnn.go
deleted file mode 100644
index 92b7b2a..0000000
--- a/androidnn.go
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright © 2017 ARM Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-package armnn_nn_driver
-
-import (
-    "android/soong/android"
-    "android/soong/cc"
-)
-
-func globalFlags(ctx android.BaseContext) []string {
-    var cppflags []string
-
-    if ctx.AConfig().PlatformVersionName() == "Q" || ctx.AConfig().PlatformVersionName() == "10" {
-        cppflags = append(cppflags, "-fno-addrsig")
-    }
-
-    return cppflags
-}
-
-func armnnNNDriverDefaults(ctx android.LoadHookContext) {
-        type props struct {
-                Cppflags []string
-        }
-
-        p := &props{}
-        p.Cppflags = globalFlags(ctx)
-
-        ctx.AppendProperties(p)
-}
-
-func init() {
-
-  android.RegisterModuleType("armnn_nn_driver_defaults", armnnNNDriverDefaultsFactory)
-}
-
-func armnnNNDriverDefaultsFactory() android.Module {
-
-   module := cc.DefaultsFactory()
-   android.AddLoadHook(module, armnnNNDriverDefaults)
-   return module
-}
diff --git a/test/1.1/Mean.cpp b/test/1.1/Mean.cpp
index 529371e..10ca3ae 100644
--- a/test/1.1/Mean.cpp
+++ b/test/1.1/Mean.cpp
@@ -78,7 +78,7 @@
     outArg.dimensions      = expectedOutput.GetDimensions();
 
     // Make the request based on the arguments
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = hidl_vec<RequestArgument>{ inArg };
     request.outputs = hidl_vec<RequestArgument>{ outArg };
 
@@ -89,8 +89,8 @@
     android::sp<IMemory> outMemory = AddPoolAndGetData<float>(expectedOutput.GetNumElements(), request);
     const float* outputData = static_cast<const float*>(static_cast<void*>(outMemory->getPointer()));
 
-    ErrorStatus execStatus = Execute(preparedModel, request);
-    BOOST_TEST(execStatus == ErrorStatus::NONE);
+    V1_0::ErrorStatus execStatus = Execute(preparedModel, request);
+    BOOST_TEST(execStatus == V1_0::ErrorStatus::NONE);
 
     const float* expectedOutputData = expectedOutput.GetData();
     for (unsigned int i = 0; i < expectedOutput.GetNumElements(); i++)
diff --git a/test/1.1/Transpose.cpp b/test/1.1/Transpose.cpp
index 4d4238b..5679ca2 100644
--- a/test/1.1/Transpose.cpp
+++ b/test/1.1/Transpose.cpp
@@ -76,7 +76,7 @@
     output.dimensions = expectedOutputTensor.GetDimensions();
 
     // make the request based on the arguments
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = hidl_vec<RequestArgument>{input};
     request.outputs = hidl_vec<RequestArgument>{output};
 
diff --git a/test/1.2/Capabilities.cpp b/test/1.2/Capabilities.cpp
index 8a769db..2bbd7be 100644
--- a/test/1.2/Capabilities.cpp
+++ b/test/1.2/Capabilities.cpp
@@ -57,8 +57,9 @@
     }
 };
 
-void CheckOperandType(const V1_2::Capabilities& capabilities, OperandType type, float execTime, float powerUsage)
+void CheckOperandType(const V1_2::Capabilities& capabilities, V1_2::OperandType type, float execTime, float powerUsage)
 {
+    using namespace armnn_driver::hal_1_2;
     PerformanceInfo perfInfo = android::nn::lookup(capabilities.operandPerformance, type);
     BOOST_ASSERT(perfInfo.execTime == execTime);
     BOOST_ASSERT(perfInfo.powerUsage == powerUsage);
@@ -71,28 +72,28 @@
     using namespace armnn_driver::hal_1_2;
     using namespace android::nn;
 
-    auto getCapabilitiesFn = [&](ErrorStatus error, const V1_2::Capabilities& capabilities)
+    auto getCapabilitiesFn = [&](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities)
         {
-            CheckOperandType(capabilities, OperandType::TENSOR_FLOAT32, 2.0f, 2.1f);
-            CheckOperandType(capabilities, OperandType::FLOAT32, 2.2f, 2.3f);
-            CheckOperandType(capabilities, OperandType::TENSOR_FLOAT16, 2.4f, 2.5f);
-            CheckOperandType(capabilities, OperandType::FLOAT16, 2.6f, 2.7f);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_ASYMM, 2.8f, 2.9f);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_SYMM, 3.0f, 3.1f);
-            CheckOperandType(capabilities, OperandType::TENSOR_INT32, 3.2f, 3.3f);
-            CheckOperandType(capabilities, OperandType::INT32, 3.4f, 3.5f);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM, 2.8f, 2.9f);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, 2.8f, 2.9f);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_FLOAT32, 2.0f, 2.1f);
+            CheckOperandType(capabilities, V1_2::OperandType::FLOAT32, 2.2f, 2.3f);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_FLOAT16, 2.4f, 2.5f);
+            CheckOperandType(capabilities, V1_2::OperandType::FLOAT16, 2.6f, 2.7f);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_ASYMM, 2.8f, 2.9f);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT16_SYMM, 3.0f, 3.1f);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_INT32, 3.2f, 3.3f);
+            CheckOperandType(capabilities, V1_2::OperandType::INT32, 3.4f, 3.5f);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_SYMM, 2.8f, 2.9f);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, 2.8f, 2.9f);
 
             // Unsupported operands take FLT_MAX value
-            CheckOperandType(capabilities, OperandType::UINT32, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::BOOL, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::OEM, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::UINT32, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::BOOL, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
 
-            BOOST_ASSERT(error == ErrorStatus::NONE);
+            BOOST_ASSERT(error == V1_0::ErrorStatus::NONE);
         };
 
     __system_property_set("Armnn.operandTypeTensorFloat32Performance.execTime", "2.0f");
@@ -129,28 +130,31 @@
 
     float defaultValue = .1f;
 
-    auto getCapabilitiesFn = [&](ErrorStatus error, const V1_2::Capabilities& capabilities)
+    auto getCapabilitiesFn = [&](V1_0::ErrorStatus error, const V1_2::Capabilities& capabilities)
         {
-            CheckOperandType(capabilities, OperandType::TENSOR_FLOAT32, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::FLOAT32, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::TENSOR_FLOAT16, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::FLOAT16, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_ASYMM, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_SYMM, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::TENSOR_INT32, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::INT32, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL, defaultValue, defaultValue);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT8_SYMM, defaultValue, defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_FLOAT32, defaultValue, defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::FLOAT32, defaultValue, defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_FLOAT16, defaultValue, defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::FLOAT16, defaultValue, defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_ASYMM, defaultValue, defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT16_SYMM, defaultValue, defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_INT32, defaultValue, defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::INT32, defaultValue, defaultValue);
+            CheckOperandType(capabilities,
+                             V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
+                             defaultValue,
+                             defaultValue);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT8_SYMM, defaultValue, defaultValue);
 
             // Unsupported operands take FLT_MAX value
-            CheckOperandType(capabilities, OperandType::UINT32, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::BOOL, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::OEM, FLT_MAX, FLT_MAX);
-            CheckOperandType(capabilities, OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::UINT32, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::BOOL, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_QUANT16_ASYMM, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_BOOL8, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::OEM, FLT_MAX, FLT_MAX);
+            CheckOperandType(capabilities, V1_2::OperandType::TENSOR_OEM_BYTE, FLT_MAX, FLT_MAX);
 
-            BOOST_ASSERT(error == ErrorStatus::NONE);
+            BOOST_ASSERT(error == V1_0::ErrorStatus::NONE);
         };
 
     armnn::IRuntime::CreationOptions options;
diff --git a/test/Android.mk b/test/Android.mk
index 13a36b5..0448d18 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -42,12 +42,11 @@
         -O0 \
         -UNDEBUG
 
-ifeq ($(P_OR_LATER),1)
-# Required to build with the changes made to the Android ML framework starting from Android P,
-# regardless of the HAL version used for the build.
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(R_OR_LATER),1)
 LOCAL_CFLAGS+= \
-        -DARMNN_ANDROID_P
-endif # PLATFORM_VERSION == 9
+        -DARMNN_ANDROID_R
+endif # R or later
 
 ifeq ($(Q_OR_LATER),1)
 LOCAL_CFLAGS += \
@@ -108,6 +107,11 @@
         android.hardware.neuralnetworks@1.2
 endif # PLATFORM_VERSION == Q
 
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # R or later
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
         libOpenCL
@@ -148,9 +152,14 @@
         -Werror \
         -O0 \
         -UNDEBUG \
-        -DARMNN_ANDROID_P \
         -DARMNN_ANDROID_NN_V1_1
 
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(R_OR_LATER),1)
+LOCAL_CFLAGS+= \
+        -DARMNN_ANDROID_R
+endif # R or later
+
 ifeq ($(Q_OR_LATER),1)
 LOCAL_CFLAGS += \
         -DBOOST_NO_AUTO_PTR
@@ -207,6 +216,11 @@
         android.hardware.neuralnetworks@1.2
 endif # PLATFORM_VERSION == Q
 
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # R or later
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
         libOpenCL
@@ -245,13 +259,14 @@
         -Werror \
         -O0 \
         -UNDEBUG \
-        -DARMNN_ANDROID_Q \
+        -DBOOST_NO_AUTO_PTR \
         -DARMNN_ANDROID_NN_V1_2
 
-ifeq ($(Q_OR_LATER),1)
-LOCAL_CFLAGS += \
-        -DBOOST_NO_AUTO_PTR
-endif # PLATFORM_VERSION == Q or later
+# Required to build with the changes made to the Android ML framework specific to Android R
+ifeq ($(R_OR_LATER),1)
+LOCAL_CFLAGS+= \
+        -DARMNN_ANDROID_R
+endif # R or later
 
 LOCAL_SRC_FILES := \
         1.0/Convolution2D.cpp \
@@ -303,6 +318,11 @@
         android.hidl.allocator@1.0 \
         android.hidl.memory@1.0
 
+ifeq ($(R_OR_LATER),1)
+LOCAL_SHARED_LIBRARIES+= \
+        android.hardware.neuralnetworks@1.3
+endif # R or later
+
 ifeq ($(ARMNN_COMPUTE_CL_ENABLED),1)
 LOCAL_SHARED_LIBRARIES+= \
         libOpenCL
@@ -310,4 +330,4 @@
 
 include $(BUILD_EXECUTABLE)
 
-endif # PLATFORM_VERSION == Q
+endif # PLATFORM_VERSION == Q
\ No newline at end of file
diff --git a/test/Concat.cpp b/test/Concat.cpp
index 9beb67b..b99e31c 100644
--- a/test/Concat.cpp
+++ b/test/Concat.cpp
@@ -35,8 +35,8 @@
                 int32_t concatAxis,
                 const TestTensor & expectedOutputTensor,
                 armnn::Compute computeDevice,
-                ErrorStatus expectedPrepareStatus=ErrorStatus::NONE,
-                ErrorStatus expectedExecStatus=ErrorStatus::NONE)
+                V1_0::ErrorStatus expectedPrepareStatus=V1_0::ErrorStatus::NONE,
+                V1_0::ErrorStatus expectedExecStatus=V1_0::ErrorStatus::NONE)
 {
     std::unique_ptr<ArmnnDriver> driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice));
     HalPolicy::Model model{};
@@ -59,13 +59,13 @@
     model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
 
     // make the prepared model
-    ErrorStatus prepareStatus=ErrorStatus::NONE;
+    V1_0::ErrorStatus prepareStatus=V1_0::ErrorStatus::NONE;
     android::sp<V1_0::IPreparedModel> preparedModel = PrepareModelWithStatus(model,
                                                                              *driver,
                                                                              prepareStatus,
                                                                              expectedPrepareStatus);
     BOOST_TEST(prepareStatus == expectedPrepareStatus);
-    if (prepareStatus != ErrorStatus::NONE)
+    if (prepareStatus != V1_0::ErrorStatus::NONE)
     {
         // prepare failed, we cannot continue
         return;
@@ -111,7 +111,7 @@
     }
 
     // make the request based on the arguments
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = inputArguments;
     request.outputs = outputArguments;
 
@@ -131,7 +131,7 @@
     auto execStatus = Execute(preparedModel, request, expectedExecStatus);
     BOOST_TEST(execStatus == expectedExecStatus);
 
-    if (execStatus == ErrorStatus::NONE)
+    if (execStatus == V1_0::ErrorStatus::NONE)
     {
         // check the result if there was no error
         const float * expectedOutput = expectedOutputTensor.GetData();
@@ -310,7 +310,7 @@
     // The axis must be within the range of [-rank(values), rank(values))
     // see: https://www.tensorflow.org/api_docs/python/tf/concat
     TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
 }
 
@@ -323,7 +323,7 @@
     // The axis must be within the range of [-rank(values), rank(values))
     // see: https://www.tensorflow.org/api_docs/python/tf/concat
     TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
 }
 
@@ -333,7 +333,7 @@
     TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
 
     // We need at least two tensors to concatenate
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn}, axis, aIn, sample, expectedParserStatus);
 }
 
@@ -350,7 +350,7 @@
                                                      2, 3, 7, 8, 9, 11}};
 
     // The input dimensions must be compatible
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, sample, expectedParserStatus);
 }
 
@@ -362,7 +362,7 @@
     TestTensor expected{armnn::TensorShape{1,1,3},{0,1,4}};
 
     // The input dimensions must be compatible
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn}, axis, expected, sample, expectedParserStatus);
 }
 
@@ -380,7 +380,7 @@
                                                        2, 3, 7, 8, 9, 11}};
 
     // The input and output dimensions must be compatible
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
 }
 
@@ -398,7 +398,7 @@
                                                    2, 3, 7, 8, 9, 11}};
 
     // The input and output ranks must match
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
 }
 
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
index 9fe6f46..ecf25e1 100644
--- a/test/Concurrent.cpp
+++ b/test/Concurrent.cpp
@@ -75,7 +75,7 @@
     output.dimensions = hidl_vec<uint32_t>{};
 
     // build the requests
-    Request requests[maxRequests];
+    V1_0::Request requests[maxRequests];
     android::sp<IMemory> outMemory[maxRequests];
     float* outdata[maxRequests];
     for (size_t i = 0; i < maxRequests; ++i)
diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp
index 180f57e..002677f 100644
--- a/test/Convolution2D.hpp
+++ b/test/Convolution2D.hpp
@@ -93,7 +93,7 @@
     output.location        = outloc;
     output.dimensions      = hidl_vec<uint32_t>{};
 
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = hidl_vec<RequestArgument>{input};
     request.outputs = hidl_vec<RequestArgument>{output};
 
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index 3a3c98f..0bc0cf7 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -15,7 +15,7 @@
 namespace V1_0
 {
 
-std::ostream& operator<<(std::ostream& os, ErrorStatus stat)
+std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat)
 {
    return os << static_cast<int>(stat);
 }
@@ -31,7 +31,7 @@
 using namespace android::hardware;
 using namespace armnn_driver;
 
-Return<void> ExecutionCallback::notify(ErrorStatus status)
+Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status)
 {
     (void)status;
     ALOGI("ExecutionCallback::notify invoked");
@@ -53,7 +53,7 @@
     return Void();
 }
 
-Return<void> PreparedModelCallback::notify(ErrorStatus status,
+Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
                                            const android::sp<V1_0::IPreparedModel>& preparedModel)
 {
     m_ErrorStatus = status;
@@ -63,7 +63,7 @@
 
 #ifdef ARMNN_ANDROID_NN_V1_2
 
-Return<void> PreparedModelCallback_1_2::notify(ErrorStatus status,
+Return<void> PreparedModelCallback_1_2::notify(V1_0::ErrorStatus status,
                                                const android::sp<V1_0::IPreparedModel>& preparedModel)
 {
     m_ErrorStatus = status;
@@ -71,7 +71,7 @@
     return Void();
 }
 
-Return<void> PreparedModelCallback_1_2::notify_1_2(ErrorStatus status,
+Return<void> PreparedModelCallback_1_2::notify_1_2(V1_0::ErrorStatus status,
                                                    const android::sp<V1_2::IPreparedModel>& preparedModel)
 {
     m_ErrorStatus = status;
@@ -104,15 +104,15 @@
 
 android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
                                                          armnn_driver::ArmnnDriver& driver,
-                                                         ErrorStatus& prepareStatus,
-                                                         ErrorStatus expectedStatus)
+                                                         V1_0::ErrorStatus& prepareStatus,
+                                                         V1_0::ErrorStatus expectedStatus)
 {
     android::sp<PreparedModelCallback> cb(new PreparedModelCallback());
     driver.prepareModel(model, cb);
 
     prepareStatus = cb->GetErrorStatus();
     BOOST_TEST(prepareStatus == expectedStatus);
-    if (expectedStatus == ErrorStatus::NONE)
+    if (expectedStatus == V1_0::ErrorStatus::NONE)
     {
         BOOST_TEST((cb->GetPreparedModel() != nullptr));
     }
@@ -123,15 +123,15 @@
 
 android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
                                                          armnn_driver::ArmnnDriver& driver,
-                                                         ErrorStatus& prepareStatus,
-                                                         ErrorStatus expectedStatus)
+                                                         V1_0::ErrorStatus& prepareStatus,
+                                                         V1_0::ErrorStatus expectedStatus)
 {
     android::sp<PreparedModelCallback> cb(new PreparedModelCallback());
     driver.prepareModel_1_1(model, V1_1::ExecutionPreference::LOW_POWER, cb);
 
     prepareStatus = cb->GetErrorStatus();
     BOOST_TEST(prepareStatus == expectedStatus);
-    if (expectedStatus == ErrorStatus::NONE)
+    if (expectedStatus == V1_0::ErrorStatus::NONE)
     {
         BOOST_TEST((cb->GetPreparedModel() != nullptr));
     }
@@ -144,8 +144,8 @@
 
 android::sp<V1_2::IPreparedModel> PrepareModelWithStatus_1_2(const armnn_driver::hal_1_2::HalPolicy::Model& model,
                                                              armnn_driver::ArmnnDriver& driver,
-                                                             ErrorStatus& prepareStatus,
-                                                             ErrorStatus expectedStatus)
+                                                             V1_0::ErrorStatus& prepareStatus,
+                                                             V1_0::ErrorStatus expectedStatus)
 {
     android::sp<PreparedModelCallback_1_2> cb(new PreparedModelCallback_1_2());
 
@@ -157,7 +157,7 @@
 
     prepareStatus = cb->GetErrorStatus();
     BOOST_TEST(prepareStatus == expectedStatus);
-    if (expectedStatus == ErrorStatus::NONE)
+    if (expectedStatus == V1_0::ErrorStatus::NONE)
     {
         BOOST_TEST((cb->GetPreparedModel_1_2() != nullptr));
     }
@@ -166,23 +166,24 @@
 
 #endif
 
-ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
-                    const Request& request,
-                    ErrorStatus expectedStatus)
+V1_0::ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
+                          const V1_0::Request& request,
+                          V1_0::ErrorStatus expectedStatus)
 {
     BOOST_TEST(preparedModel.get() != nullptr);
     android::sp<ExecutionCallback> cb(new ExecutionCallback());
-    ErrorStatus execStatus = preparedModel->execute(request, cb);
+    V1_0::ErrorStatus execStatus = preparedModel->execute(request, cb);
     BOOST_TEST(execStatus == expectedStatus);
     ALOGI("Execute: waiting for callback to be invoked");
     cb->wait();
     return execStatus;
 }
 
-android::sp<ExecutionCallback> ExecuteNoWait(android::sp<V1_0::IPreparedModel> preparedModel, const Request& request)
+android::sp<ExecutionCallback> ExecuteNoWait(android::sp<V1_0::IPreparedModel> preparedModel,
+                                             const V1_0::Request& request)
 {
     android::sp<ExecutionCallback> cb(new ExecutionCallback());
-    BOOST_TEST(preparedModel->execute(request, cb) == ErrorStatus::NONE);
+    BOOST_TEST(preparedModel->execute(request, cb) == V1_0::ErrorStatus::NONE);
     ALOGI("ExecuteNoWait: returning callback object");
     return cb;
 }
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index 9da0260..7a35b23 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -12,6 +12,10 @@
 #include <iosfwd>
 #include <boost/test/unit_test.hpp>
 
+#include <android/hidl/allocator/1.0/IAllocator.h>
+
+using ::android::hidl::allocator::V1_0::IAllocator;
+
 namespace android
 {
 namespace hardware
@@ -21,7 +25,7 @@
 namespace V1_0
 {
 
-std::ostream& operator<<(std::ostream& os, ErrorStatus stat);
+std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat);
 
 } // namespace android::hardware::neuralnetworks::V1_0
 } // namespace android::hardware::neuralnetworks
@@ -36,7 +40,7 @@
 struct ExecutionCallback : public V1_0::IExecutionCallback
 {
     ExecutionCallback() : mNotified(false) {}
-    Return<void> notify(ErrorStatus status) override;
+    Return<void> notify(V1_0::ErrorStatus status) override;
     /// wait until the callback has notified us that it is done
     Return<void> wait();
 
@@ -52,18 +56,18 @@
 {
 public:
     PreparedModelCallback()
-        : m_ErrorStatus(ErrorStatus::NONE)
+        : m_ErrorStatus(V1_0::ErrorStatus::NONE)
         , m_PreparedModel()
     { }
     ~PreparedModelCallback() override { }
 
-    Return<void> notify(ErrorStatus status,
+    Return<void> notify(V1_0::ErrorStatus status,
                         const android::sp<V1_0::IPreparedModel>& preparedModel) override;
-    ErrorStatus GetErrorStatus() { return m_ErrorStatus; }
+    V1_0::ErrorStatus GetErrorStatus() { return m_ErrorStatus; }
     android::sp<V1_0::IPreparedModel> GetPreparedModel() { return m_PreparedModel; }
 
 private:
-    ErrorStatus                  m_ErrorStatus;
+    V1_0::ErrorStatus                  m_ErrorStatus;
     android::sp<V1_0::IPreparedModel>  m_PreparedModel;
 };
 
@@ -73,24 +77,24 @@
 {
 public:
     PreparedModelCallback_1_2()
-            : m_ErrorStatus(ErrorStatus::NONE)
+            : m_ErrorStatus(V1_0::ErrorStatus::NONE)
             , m_PreparedModel()
             , m_PreparedModel_1_2()
     { }
     ~PreparedModelCallback_1_2() override { }
 
-    Return<void> notify(ErrorStatus status, const android::sp<V1_0::IPreparedModel>& preparedModel) override;
+    Return<void> notify(V1_0::ErrorStatus status, const android::sp<V1_0::IPreparedModel>& preparedModel) override;
 
-    Return<void> notify_1_2(ErrorStatus status, const android::sp<V1_2::IPreparedModel>& preparedModel) override;
+    Return<void> notify_1_2(V1_0::ErrorStatus status, const android::sp<V1_2::IPreparedModel>& preparedModel) override;
 
-    ErrorStatus GetErrorStatus() { return m_ErrorStatus; }
+    V1_0::ErrorStatus GetErrorStatus() { return m_ErrorStatus; }
 
     android::sp<V1_0::IPreparedModel> GetPreparedModel() { return m_PreparedModel; }
 
     android::sp<V1_2::IPreparedModel> GetPreparedModel_1_2() { return m_PreparedModel_1_2; }
 
 private:
-    ErrorStatus                        m_ErrorStatus;
+    V1_0::ErrorStatus                   m_ErrorStatus;
     android::sp<V1_0::IPreparedModel>  m_PreparedModel;
     android::sp<V1_2::IPreparedModel>  m_PreparedModel_1_2;
 };
@@ -100,7 +104,7 @@
 hidl_memory allocateSharedMemory(int64_t size);
 
 template<typename T>
-android::sp<IMemory> AddPoolAndGetData(uint32_t size, Request& request)
+android::sp<IMemory> AddPoolAndGetData(uint32_t size, V1_0::Request& request)
 {
     hidl_memory pool;
 
@@ -119,7 +123,7 @@
 }
 
 template<typename T>
-void AddPoolAndSetData(uint32_t size, Request& request, const T* data)
+void AddPoolAndSetData(uint32_t size, V1_0::Request& request, const T* data)
 {
     android::sp<IMemory> memory = AddPoolAndGetData<T>(size, request);
 
@@ -201,7 +205,7 @@
                       const hidl_vec<uint32_t>& dimensions,
                       const T* values,
                       HalOperandType operandType = HalOperandType::TENSOR_FLOAT32,
-                      HalOperandLifeTime operandLifeTime = HalOperandLifeTime::CONSTANT_COPY,
+                      HalOperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY,
                       double scale = 0.f,
                       int offset = 0)
 {
@@ -247,7 +251,7 @@
                       const hidl_vec<uint32_t>& dimensions,
                       const std::vector<T>& values,
                       HalOperandType operandType = HalPolicy::OperandType::TENSOR_FLOAT32,
-                      HalOperandLifeTime operandLifeTime = HalOperandLifeTime::CONSTANT_COPY,
+                      HalOperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY,
                       double scale = 0.f,
                       int offset = 0)
 {
@@ -306,15 +310,15 @@
 
 android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
                                                          armnn_driver::ArmnnDriver& driver,
-                                                         ErrorStatus& prepareStatus,
-                                                         ErrorStatus expectedStatus = ErrorStatus::NONE);
+                                                         V1_0::ErrorStatus& prepareStatus,
+                                                         V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE);
 
 #if defined(ARMNN_ANDROID_NN_V1_1) || defined(ARMNN_ANDROID_NN_V1_2)
 
 android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
                                                          armnn_driver::ArmnnDriver& driver,
-                                                         ErrorStatus& prepareStatus,
-                                                         ErrorStatus expectedStatus = ErrorStatus::NONE);
+                                                         V1_0::ErrorStatus& prepareStatus,
+                                                         V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE);
 
 #endif
 
@@ -322,7 +326,7 @@
 android::sp<V1_0::IPreparedModel> PrepareModel(const HalModel& model,
                                                armnn_driver::ArmnnDriver& driver)
 {
-    ErrorStatus prepareStatus = ErrorStatus::NONE;
+    V1_0::ErrorStatus prepareStatus = V1_0::ErrorStatus::NONE;
     return PrepareModelWithStatus(model, driver, prepareStatus);
 }
 
@@ -330,25 +334,25 @@
 
 android::sp<V1_2::IPreparedModel> PrepareModelWithStatus_1_2(const armnn_driver::hal_1_2::HalPolicy::Model& model,
                                                             armnn_driver::ArmnnDriver& driver,
-                                                            ErrorStatus& prepareStatus,
-                                                            ErrorStatus expectedStatus = ErrorStatus::NONE);
+                                                            V1_0::ErrorStatus& prepareStatus,
+                                                            V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE);
 
 template<typename HalModel>
 android::sp<V1_2::IPreparedModel> PrepareModel_1_2(const HalModel& model,
                                                    armnn_driver::ArmnnDriver& driver)
 {
-    ErrorStatus prepareStatus = ErrorStatus::NONE;
+    V1_0::ErrorStatus prepareStatus = V1_0::ErrorStatus::NONE;
     return PrepareModelWithStatus_1_2(model, driver, prepareStatus);
 }
 
 #endif
 
 
-ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
-                    const Request& request,
-                    ErrorStatus expectedStatus = ErrorStatus::NONE);
+V1_0::ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
+                          const V1_0::Request& request,
+                          V1_0::ErrorStatus expectedStatus = V1_0::ErrorStatus::NONE);
 
 android::sp<ExecutionCallback> ExecuteNoWait(android::sp<V1_0::IPreparedModel> preparedModel,
-                                             const Request& request);
+                                             const V1_0::Request& request);
 
 } // namespace driverTestHelpers
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
index e8b5dc2..a6983dd 100644
--- a/test/FullyConnected.cpp
+++ b/test/FullyConnected.cpp
@@ -64,7 +64,7 @@
     output.location  = outloc;
     output.dimensions = hidl_vec<uint32_t>{};
 
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = hidl_vec<RequestArgument>{input};
     request.outputs = hidl_vec<RequestArgument>{output};
 
@@ -87,10 +87,10 @@
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
-    ErrorStatus error;
+    V1_0::ErrorStatus error;
     std::vector<bool> sup;
 
-    ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
+    ArmnnDriver::getSupportedOperations_cb cb = [&](V1_0::ErrorStatus status, const std::vector<bool>& supported)
         {
             error = status;
             sup = supported;
@@ -143,7 +143,7 @@
     output.location  = outloc;
     output.dimensions = hidl_vec<uint32_t>{};
 
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = hidl_vec<RequestArgument>{input};
     request.outputs = hidl_vec<RequestArgument>{output};
 
@@ -173,10 +173,10 @@
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
-    ErrorStatus error;
+    V1_0::ErrorStatus error;
     std::vector<bool> sup;
 
-    ArmnnDriver::getSupportedOperations_cb cb = [&](ErrorStatus status, const std::vector<bool>& supported)
+    ArmnnDriver::getSupportedOperations_cb cb = [&](V1_0::ErrorStatus status, const std::vector<bool>& supported)
         {
             error = status;
             sup = supported;
@@ -229,7 +229,7 @@
     output.location  = outloc;
     output.dimensions = hidl_vec<uint32_t>{};
 
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = hidl_vec<RequestArgument>{input};
     request.outputs = hidl_vec<RequestArgument>{output};
 
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
index 3788e66..961ab16 100644
--- a/test/GenericLayerTests.cpp
+++ b/test/GenericLayerTests.cpp
@@ -22,10 +22,10 @@
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
-    ErrorStatus errorStatus;
+    V1_0::ErrorStatus errorStatus;
     std::vector<bool> supported;
 
-    auto cb = [&](ErrorStatus _errorStatus, const std::vector<bool>& _supported)
+    auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
     {
         errorStatus = _errorStatus;
         supported = _supported;
@@ -52,7 +52,7 @@
     model0.operations[0].outputs = hidl_vec<uint32_t>{4};
 
     driver->getSupportedOperations(model0, cb);
-    BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE);
+    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
     BOOST_TEST(supported.size() == (size_t)1);
     BOOST_TEST(supported[0] == true);
 
@@ -81,19 +81,8 @@
 
     driver->getSupportedOperations(model1, cb);
 
-#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q)
-    // In Android P, android::nn::validateModel returns INVALID_ARGUMENT, because of the wrong number of inputs for the
-    // fully connected layer (1 instead of 4)
-    BOOST_TEST((int)errorStatus == (int)ErrorStatus::INVALID_ARGUMENT);
+    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
     BOOST_TEST(supported.empty());
-#else
-    // In Android O, android::nn::validateModel indicates that the second (wrong) fully connected layer in unsupported
-    // in the vector of flags returned by the callback
-    BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE);
-    BOOST_TEST(supported.size() == (size_t)2);
-    BOOST_TEST(supported[0] == true);
-    BOOST_TEST(supported[1] == false);
-#endif
 
     // Test Broadcast on add/mul operators
     HalPolicy::Model model2 = {};
@@ -115,7 +104,7 @@
     model2.operations[1].outputs = hidl_vec<uint32_t>{4};
 
     driver->getSupportedOperations(model2, cb);
-    BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE);
+    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
     BOOST_TEST(supported.size() == (size_t)2);
     BOOST_TEST(supported[0] == true);
     BOOST_TEST(supported[1] == true);
@@ -144,7 +133,7 @@
     model3.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
 
     driver->getSupportedOperations(model3, cb);
-    BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE);
+    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
     BOOST_TEST(supported.size() == (size_t)1);
     BOOST_TEST(supported[0] == false);
 
@@ -159,7 +148,7 @@
     model4.operations[0].outputs = hidl_vec<uint32_t>{0};
 
     driver->getSupportedOperations(model4, cb);
-    BOOST_TEST((int)errorStatus == (int)ErrorStatus::INVALID_ARGUMENT);
+    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
     BOOST_TEST(supported.empty());
 }
 
@@ -170,10 +159,10 @@
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
-    ErrorStatus errorStatus;
+    V1_0::ErrorStatus errorStatus;
     std::vector<bool> supported;
 
-    auto cb = [&](ErrorStatus _errorStatus, const std::vector<bool>& _supported)
+    auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
     {
         errorStatus = _errorStatus;
         supported = _supported;
@@ -233,7 +222,7 @@
 
     // We are testing that the unsupported layers return false and the test continues rather than failing and stopping
     driver->getSupportedOperations(model, cb);
-    BOOST_TEST((int)errorStatus == (int)ErrorStatus::NONE);
+    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
     BOOST_TEST(supported.size() == (size_t)3);
     BOOST_TEST(supported[0] == false);
     BOOST_TEST(supported[1] == true);
@@ -246,10 +235,10 @@
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
-    ErrorStatus errorStatus;
+    V1_0::ErrorStatus errorStatus;
     std::vector<bool> supported;
 
-    auto cb = [&](ErrorStatus _errorStatus, const std::vector<bool>& _supported)
+    auto cb = [&](V1_0::ErrorStatus _errorStatus, const std::vector<bool>& _supported)
     {
         errorStatus = _errorStatus;
         supported = _supported;
@@ -261,7 +250,7 @@
 
     // Memory pool mapping should fail, we should report an error
     driver->getSupportedOperations(model, cb);
-    BOOST_TEST((int)errorStatus != (int)ErrorStatus::NONE);
+    BOOST_TEST((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
     BOOST_TEST(supported.empty());
 }
 
diff --git a/test/Lstm.hpp b/test/Lstm.hpp
index f0d3d85..d3e03d7 100644
--- a/test/Lstm.hpp
+++ b/test/Lstm.hpp
@@ -54,18 +54,18 @@
 
 // Helper function to create an OperandLifeTime::NO_VALUE for testing.
 // To be used on optional input operands that have no values - these are valid and should be tested.
-OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
+V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
 {
     // Only create a NO_VALUE for optional operands that have no elements
     if (dimensions.size() == 0 || dimensions[0] == 0)
     {
-        return OperandLifeTime::NO_VALUE;
+        return V1_0::OperandLifeTime::NO_VALUE;
     }
-    return OperandLifeTime::CONSTANT_COPY;
+    return V1_0::OperandLifeTime::CONSTANT_COPY;
 }
 
 template<typename HalModel>
-void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const Request& request)
+void ExecuteModel(const HalModel& model, armnn_driver::ArmnnDriver& driver, const V1_0::Request& request)
 {
     android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, driver);
     if (preparedModel.get() != nullptr)
@@ -79,7 +79,7 @@
 template<>
 void ExecuteModel<armnn_driver::hal_1_2::HalPolicy::Model>(const armnn_driver::hal_1_2::HalPolicy::Model& model,
                                                            armnn_driver::ArmnnDriver& driver,
-                                                           const Request& request)
+                                                           const V1_0::Request& request)
 {
     android::sp<V1_2::IPreparedModel> preparedModel = PrepareModel_1_2(model, driver);
     if (preparedModel.get() != nullptr)
@@ -362,7 +362,7 @@
     outputArguments[2] = CreateRequestArgument<float>(cellStateOutValue, 5);
     outputArguments[3] = CreateRequestArgument<float>(outputValue, 6);
 
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = inputArguments;
     request.outputs = outputArguments;
 
@@ -640,7 +640,7 @@
     outputArguments[0] = CreateRequestArgument<int16_t>(cellStateOutValue, 3);
     outputArguments[1] = CreateRequestArgument<uint8_t>(outputValue, 4);
 
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = inputArguments;
     request.outputs = outputArguments;
 
diff --git a/test/Tests.cpp b/test/Tests.cpp
index 5c388cb..3b629a7 100644
--- a/test/Tests.cpp
+++ b/test/Tests.cpp
@@ -30,10 +30,10 @@
     // Making the driver object on the stack causes a weird libc error, so make it on the heap instead
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
-    ErrorStatus error;
+    V1_0::ErrorStatus error;
     V1_0::Capabilities cap;
 
-    auto cb = [&](ErrorStatus status, const V1_0::Capabilities& capabilities)
+    auto cb = [&](V1_0::ErrorStatus status, const V1_0::Capabilities& capabilities)
     {
         error = status;
         cap = capabilities;
@@ -41,7 +41,7 @@
 
     driver->getCapabilities(cb);
 
-    BOOST_TEST((int)error == (int)ErrorStatus::NONE);
+    BOOST_TEST((int)error == (int)V1_0::ErrorStatus::NONE);
     BOOST_TEST(cap.float32Performance.execTime > 0.f);
     BOOST_TEST(cap.float32Performance.powerUsage > 0.f);
     BOOST_TEST(cap.quantized8Performance.execTime > 0.f);