IVGCVSW-4473 Android R pre Hal 1_3 build changes

* Update ErrorStatus to V1_0::ErrorStatus
* Update Request to V1_0::Request
* Update OperandType to V1_2::OperandType
* Add namespace android::nn::hal in ArmnnDriverImpl for R only
* Add missing g_RelaxedFloat32toFloat16PerformancePowerUsageName
* Add namespace V1_0 or V1_1 where necessary
* Update Android.mk with R macro and android.hardware.neuralnetworks@1.3
* Remove androidnn.go
* include IAllocator in DriverTestHelpers
* Remove unused LOCAL_CFLAGS

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I1787f1ed6784b3bbec017536d87d49197405e853
Signed-off-by: Kevin May <kevin.may@arm.com>
diff --git a/1.2/ArmnnDriver.hpp b/1.2/ArmnnDriver.hpp
index 177cab6..6dba2e9 100644
--- a/1.2/ArmnnDriver.hpp
+++ b/1.2/ArmnnDriver.hpp
@@ -29,6 +29,7 @@
 class ArmnnDriver : public ArmnnDevice, public V1_2::IDevice
 {
 public:
+
     ArmnnDriver(DriverOptions options)
         : ArmnnDevice(std::move(options))
     {
@@ -57,8 +58,8 @@
                                                                                          cb);
     }
 
-    Return<ErrorStatus> prepareModel(const V1_0::Model& model,
-                                     const android::sp<V1_0::IPreparedModelCallback>& cb) override
+    Return<V1_0::ErrorStatus> prepareModel(const V1_0::Model& model,
+                                           const android::sp<V1_0::IPreparedModelCallback>& cb) override
     {
         ALOGV("hal_1_2::ArmnnDriver::prepareModel()");
 
@@ -86,9 +87,9 @@
                                                                                          cb);
     }
 
-    Return<ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
-                                         V1_1::ExecutionPreference preference,
-                                         const android::sp<V1_0::IPreparedModelCallback>& cb) override
+    Return<V1_0::ErrorStatus> prepareModel_1_1(const V1_1::Model& model,
+                                               V1_1::ExecutionPreference preference,
+                                               const android::sp<V1_0::IPreparedModelCallback>& cb) override
     {
         ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1()");
 
@@ -97,8 +98,8 @@
               preference == ExecutionPreference::SUSTAINED_SPEED))
         {
             ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_1: Invalid execution preference");
-            cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
-            return ErrorStatus::INVALID_ARGUMENT;
+            cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
+            return V1_0::ErrorStatus::INVALID_ARGUMENT;
         }
 
         return armnn_driver::ArmnnDriverImpl<hal_1_1::HalPolicy>::prepareModel(m_Runtime,
@@ -121,7 +122,7 @@
     {
         ALOGV("hal_1_2::ArmnnDriver::getVersionString()");
 
-        cb(ErrorStatus::NONE, "ArmNN");
+        cb(V1_0::ErrorStatus::NONE, "ArmNN");
         return Void();
     }
 
@@ -129,22 +130,22 @@
     {
         ALOGV("hal_1_2::ArmnnDriver::getType()");
 
-        cb(ErrorStatus::NONE, V1_2::DeviceType::CPU);
+        cb(V1_0::ErrorStatus::NONE, V1_2::DeviceType::CPU);
         return Void();
     }
 
-    Return<ErrorStatus> prepareModelFromCache(
+    Return<V1_0::ErrorStatus> prepareModelFromCache(
             const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
             const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
             const HidlToken&,
             const sp<V1_2::IPreparedModelCallback>& callback)
     {
         ALOGV("hal_1_2::ArmnnDriver::prepareModelFromCache()");
-        callback->notify_1_2(ErrorStatus::GENERAL_FAILURE, nullptr);
-        return ErrorStatus::GENERAL_FAILURE;
+        callback->notify_1_2(V1_0::ErrorStatus::GENERAL_FAILURE, nullptr);
+        return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
 
-    Return<ErrorStatus> prepareModel_1_2(const V1_2::Model& model, V1_1::ExecutionPreference preference,
+    Return<V1_0::ErrorStatus> prepareModel_1_2(const V1_2::Model& model, V1_1::ExecutionPreference preference,
             const android::hardware::hidl_vec<android::hardware::hidl_handle>&,
             const android::hardware::hidl_vec<android::hardware::hidl_handle>&, const HidlToken&,
             const android::sp<V1_2::IPreparedModelCallback>& cb)
@@ -156,8 +157,8 @@
               preference == ExecutionPreference::SUSTAINED_SPEED))
         {
             ALOGV("hal_1_2::ArmnnDriver::prepareModel_1_2: Invalid execution preference");
-            cb->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
-            return ErrorStatus::INVALID_ARGUMENT;
+            cb->notify(V1_0::ErrorStatus::INVALID_ARGUMENT, nullptr);
+            return V1_0::ErrorStatus::INVALID_ARGUMENT;
         }
 
         return ArmnnDriverImpl::prepareArmnnModel_1_2(m_Runtime,
@@ -172,7 +173,7 @@
     Return<void> getSupportedExtensions(getSupportedExtensions_cb cb)
     {
         ALOGV("hal_1_2::ArmnnDriver::getSupportedExtensions()");
-        cb(ErrorStatus::NONE, {/* No extensions. */});
+        cb(V1_0::ErrorStatus::NONE, {/* No extensions. */});
         return Void();
     }
 
@@ -199,7 +200,7 @@
         ALOGV("hal_1_2::ArmnnDriver::getSupportedExtensions()");
 
         // Set both numbers to be 0 for cache not supported.
-        cb(ErrorStatus::NONE, 0, 0);
+        cb(V1_0::ErrorStatus::NONE, 0, 0);
         return Void();
     }
 };
diff --git a/1.2/ArmnnDriverImpl.cpp b/1.2/ArmnnDriverImpl.cpp
index 691156f..bfa730b 100644
--- a/1.2/ArmnnDriverImpl.cpp
+++ b/1.2/ArmnnDriverImpl.cpp
@@ -57,7 +57,7 @@
 
 
 void NotifyCallbackAndCheck(const sp<V1_2::IPreparedModelCallback>& callback,
-                            ErrorStatus errorStatus,
+                            V1_0::ErrorStatus errorStatus,
                             const sp<V1_2::IPreparedModel>& preparedModelPtr)
 {
     Return<void> returned = callback->notify_1_2(errorStatus, preparedModelPtr);
@@ -69,9 +69,9 @@
     }
 }
 
-Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
-                                     const std::string& message,
-                                     const sp<V1_2::IPreparedModelCallback>& callback)
+Return<V1_0::ErrorStatus> FailPrepareModel(V1_0::ErrorStatus error,
+                                           const std::string& message,
+                                           const sp<V1_2::IPreparedModelCallback>& callback)
 {
     ALOGW("ArmnnDriverImpl::prepareModel: %s", message.c_str());
     NotifyCallbackAndCheck(callback, error, nullptr);
@@ -85,29 +85,30 @@
 namespace hal_1_2
 {
 
-Return<ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime,
-                                                           const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
-                                                           const DriverOptions& options,
-                                                           const V1_2::Model& model,
-                                                           const sp<V1_2::IPreparedModelCallback>& cb,
-                                                           bool float32ToFloat16)
+Return<V1_0::ErrorStatus> ArmnnDriverImpl::prepareArmnnModel_1_2(
+       const armnn::IRuntimePtr& runtime,
+       const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+       const DriverOptions& options,
+       const V1_2::Model& model,
+       const sp<V1_2::IPreparedModelCallback>& cb,
+       bool float32ToFloat16)
 {
     ALOGV("ArmnnDriverImpl::prepareArmnnModel_1_2()");
 
     if (cb.get() == nullptr)
     {
         ALOGW("ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel");
-        return ErrorStatus::INVALID_ARGUMENT;
+        return V1_0::ErrorStatus::INVALID_ARGUMENT;
     }
 
     if (!runtime)
     {
-        return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, "Device unavailable", cb);
     }
 
     if (!android::nn::validateModel(model))
     {
-        return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::INVALID_ARGUMENT, "Invalid model passed as input", cb);
     }
 
     // Deliberately ignore any unsupported operations requested by the options -
@@ -120,8 +121,8 @@
 
     if (modelConverter.GetConversionResult() != ConversionResult::Success)
     {
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Optimize the network
@@ -142,8 +143,8 @@
     {
         std::stringstream message;
         message << "Exception (" << e.what() << ") caught from optimize.";
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Check that the optimized network is valid.
@@ -155,8 +156,8 @@
         {
             message << "\n" << msg;
         }
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Export the optimized network graph to a dot file if an output dump directory
@@ -170,15 +171,15 @@
     {
         if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
         {
-            return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
+            return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be loaded", cb);
         }
     }
     catch (std::exception& e)
     {
         std::stringstream message;
         message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
-        FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
-        return ErrorStatus::NONE;
+        FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, message.str(), cb);
+        return V1_0::ErrorStatus::NONE;
     }
 
     // Now that we have a networkId for the graph rename the dump file to use it
@@ -199,7 +200,7 @@
     // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
     if (!preparedModel->ExecuteWithDummyInputs())
     {
-        return FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
+        return FailPrepareModel(V1_0::ErrorStatus::GENERAL_FAILURE, "Network could not be executed", cb);
     }
 
     if (clTunedParameters &&
@@ -217,9 +218,9 @@
         }
     }
 
-    NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel.release());
+    NotifyCallbackAndCheck(cb, V1_0::ErrorStatus::NONE, preparedModel.release());
 
-    return ErrorStatus::NONE;
+    return V1_0::ErrorStatus::NONE;
 }
 
 Return<void> ArmnnDriverImpl::getCapabilities_1_2(const armnn::IRuntimePtr& runtime,
@@ -240,52 +241,56 @@
                 ParseSystemProperty(g_RelaxedFloat32toFloat16PerformancePowerUsage, defaultValue);
 
         // Set the base value for all operand types
+        #ifdef ARMNN_ANDROID_R
+        capabilities.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_2>({FLT_MAX, FLT_MAX});
+        #else
         capabilities.operandPerformance = nonExtensionOperandPerformance({FLT_MAX, FLT_MAX});
+        #endif
 
         // Load supported operand types
-        update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT32,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT32,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorFloat32PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat32PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::FLOAT32,
+        update(&capabilities.operandPerformance, V1_2::OperandType::FLOAT32,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeFloat32PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeFloat32PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_FLOAT16,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_FLOAT16,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorFloat16PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorFloat16PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::FLOAT16,
+        update(&capabilities.operandPerformance, V1_2::OperandType::FLOAT16,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeFloat16PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeFloat16PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_ASYMM,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_ASYMM,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8AsymmPerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_SYMM,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_SYMM,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT16_SYMM,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT16_SYMM,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorQuant16SymmPerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
                {
                    .execTime =
                    ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerChannelPerformanceExecTime, defaultValue),
@@ -293,19 +298,19 @@
                    ParseSystemProperty(g_OperandTypeTensorQuant8SymmPerChannelPerformancePowerUsage, defaultValue)
                });
 
-        update(&capabilities.operandPerformance, OperandType::TENSOR_INT32,
+        update(&capabilities.operandPerformance, V1_2::OperandType::TENSOR_INT32,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeTensorInt32PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeTensorInt32PerformancePowerUsage, defaultValue)
                 });
 
-        update(&capabilities.operandPerformance, OperandType::INT32,
+        update(&capabilities.operandPerformance, V1_2::OperandType::INT32,
                 {
                     .execTime = ParseSystemProperty(g_OperandTypeInt32PerformanceExecTime, defaultValue),
                     .powerUsage = ParseSystemProperty(g_OperandTypeInt32PerformancePowerUsage, defaultValue)
                 });
 
-        cb(ErrorStatus::NONE, capabilities);
+        cb(V1_0::ErrorStatus::NONE, capabilities);
     }
     else
     {
@@ -313,13 +318,17 @@
         capabilities.relaxedFloat32toFloat16PerformanceTensor.execTime = 0;
 
         // Set the base value for all operand types
+        #ifdef ARMNN_ANDROID_R
+        capabilities.operandPerformance = nonExtensionOperandPerformance<HalVersion::V1_2>({0.f, 0.0f});
+        #else
         capabilities.operandPerformance = nonExtensionOperandPerformance({0.f, 0.0f});
+        #endif
 
-        cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
+        cb(V1_0::ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
     }
 
     return Void();
 }
 
 } // namespace hal_1_2
-} // namespace armnn_driver
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/1.2/ArmnnDriverImpl.hpp b/1.2/ArmnnDriverImpl.hpp
index b3c6507..73ba133 100644
--- a/1.2/ArmnnDriverImpl.hpp
+++ b/1.2/ArmnnDriverImpl.hpp
@@ -11,6 +11,13 @@
 
 #include <armnn/ArmNN.hpp>
 
+#ifdef ARMNN_ANDROID_R
+using namespace android::nn::hal;
+#endif
+
+namespace V1_0 = ::android::hardware::neuralnetworks::V1_0;
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+
 namespace armnn_driver
 {
 namespace hal_1_2
@@ -19,12 +26,12 @@
 class ArmnnDriverImpl
 {
 public:
-    static Return<ErrorStatus> prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime,
-                                                     const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
-                                                     const DriverOptions& options,
-                                                     const V1_2::Model& model,
-                                                     const android::sp<V1_2::IPreparedModelCallback>& cb,
-                                                     bool float32ToFloat16 = false);
+    static Return<V1_0::ErrorStatus> prepareArmnnModel_1_2(const armnn::IRuntimePtr& runtime,
+                                                           const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+                                                           const DriverOptions& options,
+                                                           const V1_2::Model& model,
+                                                           const android::sp<V1_2::IPreparedModelCallback>& cb,
+                                                           bool float32ToFloat16 = false);
 
     static Return<void> getCapabilities_1_2(const armnn::IRuntimePtr& runtime,
                                             V1_2::IDevice::getCapabilities_1_2_cb cb);
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 8e4ef8a..b3ccc47 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -26,9 +26,9 @@
 namespace
 {
 
-bool IsQSymmDequantizeForWeights(const Operation& operation, const Model& model)
+bool IsQSymmDequantizeForWeights(const HalPolicy::Operation& operation, const HalPolicy::Model& model)
 {
-    const Operand* operand = GetInputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+    const HalPolicy::Operand* operand = GetInputOperand<hal_1_2::HalPolicy>(operation, 0, model);
     if (!operand)
     {
         return false;
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index e0a5c2f..cd4f2da 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -11,6 +11,8 @@
 
 #include <armnn/Types.hpp>
 
+namespace V1_2 = ::android::hardware::neuralnetworks::V1_2;
+
 namespace armnn_driver
 {
 namespace hal_1_2