IVGCVSW-4473 Android R pre Hal 1_3 build changes

* Update ErrorStatus to V1_0::ErrorStatus
* Update Request to V1_0::Request
* Update OperandType to V1_2::OperandType
* Add namespace android::nn::hal in ArmnnDriverImpl for R only
* Add missing g_RelaxedFloat32toFloat16PerformancePowerUsageName
* Add namespace V1_0 or V1_1 where necessary
* Update Android.mk with R macro and android.hardware.neuralnetworks@1.3
* Remove androidnn.go
* include IAllocator in DriverTestHelpers
* Remove unused LOCAL_CFLAGS

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I1787f1ed6784b3bbec017536d87d49197405e853
Signed-off-by: Kevin May <kevin.may@arm.com>
diff --git a/test/Concat.cpp b/test/Concat.cpp
index 9beb67b..b99e31c 100644
--- a/test/Concat.cpp
+++ b/test/Concat.cpp
@@ -35,8 +35,8 @@
                 int32_t concatAxis,
                 const TestTensor & expectedOutputTensor,
                 armnn::Compute computeDevice,
-                ErrorStatus expectedPrepareStatus=ErrorStatus::NONE,
-                ErrorStatus expectedExecStatus=ErrorStatus::NONE)
+                V1_0::ErrorStatus expectedPrepareStatus=V1_0::ErrorStatus::NONE,
+                V1_0::ErrorStatus expectedExecStatus=V1_0::ErrorStatus::NONE)
 {
     std::unique_ptr<ArmnnDriver> driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice));
     HalPolicy::Model model{};
@@ -59,13 +59,13 @@
     model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
 
     // make the prepared model
-    ErrorStatus prepareStatus=ErrorStatus::NONE;
+    V1_0::ErrorStatus prepareStatus=V1_0::ErrorStatus::NONE;
     android::sp<V1_0::IPreparedModel> preparedModel = PrepareModelWithStatus(model,
                                                                              *driver,
                                                                              prepareStatus,
                                                                              expectedPrepareStatus);
     BOOST_TEST(prepareStatus == expectedPrepareStatus);
-    if (prepareStatus != ErrorStatus::NONE)
+    if (prepareStatus != V1_0::ErrorStatus::NONE)
     {
         // prepare failed, we cannot continue
         return;
@@ -111,7 +111,7 @@
     }
 
     // make the request based on the arguments
-    Request request = {};
+    V1_0::Request request = {};
     request.inputs  = inputArguments;
     request.outputs = outputArguments;
 
@@ -131,7 +131,7 @@
     auto execStatus = Execute(preparedModel, request, expectedExecStatus);
     BOOST_TEST(execStatus == expectedExecStatus);
 
-    if (execStatus == ErrorStatus::NONE)
+    if (execStatus == V1_0::ErrorStatus::NONE)
     {
         // check the result if there was no error
         const float * expectedOutput = expectedOutputTensor.GetData();
@@ -310,7 +310,7 @@
     // The axis must be within the range of [-rank(values), rank(values))
     // see: https://www.tensorflow.org/api_docs/python/tf/concat
     TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
 }
 
@@ -323,7 +323,7 @@
     // The axis must be within the range of [-rank(values), rank(values))
     // see: https://www.tensorflow.org/api_docs/python/tf/concat
     TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
 }
 
@@ -333,7 +333,7 @@
     TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
 
     // We need at least two tensors to concatenate
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn}, axis, aIn, sample, expectedParserStatus);
 }
 
@@ -350,7 +350,7 @@
                                                      2, 3, 7, 8, 9, 11}};
 
     // The input dimensions must be compatible
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, sample, expectedParserStatus);
 }
 
@@ -362,7 +362,7 @@
     TestTensor expected{armnn::TensorShape{1,1,3},{0,1,4}};
 
     // The input dimensions must be compatible
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn}, axis, expected, sample, expectedParserStatus);
 }
 
@@ -380,7 +380,7 @@
                                                        2, 3, 7, 8, 9, 11}};
 
     // The input and output dimensions must be compatible
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
 }
 
@@ -398,7 +398,7 @@
                                                    2, 3, 7, 8, 9, 11}};
 
     // The input and output ranks must match
-    ErrorStatus expectedParserStatus = ErrorStatus::GENERAL_FAILURE;
+    V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
 }