IVGCVSW-2911 Android Q compatibility code updates

 * Updates on test classes.

Change-Id: I52e11a1d97d02c23e0a33e4e128dc43a4a95b5bc
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
index 55a1a39..0848a88 100644
--- a/test/Concurrent.cpp
+++ b/test/Concurrent.cpp
@@ -45,7 +45,7 @@
 
     // make the prepared models
     const size_t maxRequests = 5;
-    android::sp<IPreparedModel> preparedModels[maxRequests];
+    android::sp<V1_0::IPreparedModel> preparedModels[maxRequests];
     for (size_t i = 0; i < maxRequests; ++i)
     {
         preparedModels[i] = PrepareModel(model, *driver);
diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp
index ff417d9..3fda29b 100644
--- a/test/Convolution2D.hpp
+++ b/test/Convolution2D.hpp
@@ -59,7 +59,7 @@
 
     // make the prepared model
     SetModelFp16Flag(model, fp16Enabled);
-    android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+    android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
 
     // construct the request
     DataLocation inloc    = {};
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index 3bc0a20..7a6b780 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -54,7 +54,7 @@
 }
 
 Return<void> PreparedModelCallback::notify(ErrorStatus status,
-                                           const android::sp<IPreparedModel>& preparedModel)
+                                           const android::sp<V1_0::IPreparedModel>& preparedModel)
 {
     m_ErrorStatus = status;
     m_PreparedModel = preparedModel;
@@ -109,10 +109,10 @@
     memcpy(dst, data, size * sizeof(float));
 }
 
-android::sp<IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
-                                                   armnn_driver::ArmnnDriver& driver,
-                                                   ErrorStatus& prepareStatus,
-                                                   ErrorStatus expectedStatus)
+android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
+                                                         armnn_driver::ArmnnDriver& driver,
+                                                         ErrorStatus& prepareStatus,
+                                                         ErrorStatus expectedStatus)
 {
     android::sp<PreparedModelCallback> cb(new PreparedModelCallback());
     driver.prepareModel(model, cb);
@@ -128,10 +128,10 @@
 
 #ifdef ARMNN_ANDROID_NN_V1_1
 
-android::sp<IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
-                                                   armnn_driver::ArmnnDriver& driver,
-                                                   ErrorStatus& prepareStatus,
-                                                   ErrorStatus expectedStatus)
+android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
+                                                         armnn_driver::ArmnnDriver& driver,
+                                                         ErrorStatus& prepareStatus,
+                                                         ErrorStatus expectedStatus)
 {
     android::sp<PreparedModelCallback> cb(new PreparedModelCallback());
     driver.prepareModel_1_1(model, V1_1::ExecutionPreference::LOW_POWER, cb);
@@ -147,7 +147,7 @@
 
 #endif
 
-ErrorStatus Execute(android::sp<IPreparedModel> preparedModel,
+ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
                     const Request& request,
                     ErrorStatus expectedStatus)
 {
@@ -160,7 +160,7 @@
     return execStatus;
 }
 
-android::sp<ExecutionCallback> ExecuteNoWait(android::sp<IPreparedModel> preparedModel, const Request& request)
+android::sp<ExecutionCallback> ExecuteNoWait(android::sp<V1_0::IPreparedModel> preparedModel, const Request& request)
 {
     android::sp<ExecutionCallback> cb(new ExecutionCallback());
     BOOST_TEST(preparedModel->execute(request, cb) == ErrorStatus::NONE);
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index 394720e..4a8f607 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -33,7 +33,7 @@
 
 std::ostream& operator<<(std::ostream& os, V1_0::ErrorStatus stat);
 
-struct ExecutionCallback : public IExecutionCallback
+struct ExecutionCallback : public V1_0::IExecutionCallback
 {
     ExecutionCallback() : mNotified(false) {}
     Return<void> notify(ErrorStatus status) override;
@@ -48,7 +48,7 @@
     bool mNotified;
 };
 
-class PreparedModelCallback : public IPreparedModelCallback
+class PreparedModelCallback : public V1_0::IPreparedModelCallback
 {
 public:
     PreparedModelCallback()
@@ -58,13 +58,13 @@
     ~PreparedModelCallback() override { }
 
     Return<void> notify(ErrorStatus status,
-                        const android::sp<IPreparedModel>& preparedModel) override;
+                        const android::sp<V1_0::IPreparedModel>& preparedModel) override;
     ErrorStatus GetErrorStatus() { return m_ErrorStatus; }
-    android::sp<IPreparedModel> GetPreparedModel() { return m_PreparedModel; }
+    android::sp<V1_0::IPreparedModel> GetPreparedModel() { return m_PreparedModel; }
 
 private:
     ErrorStatus                  m_ErrorStatus;
-    android::sp<IPreparedModel>  m_PreparedModel;
+    android::sp<V1_0::IPreparedModel>  m_PreparedModel;
 };
 
 hidl_memory allocateSharedMemory(int64_t size);
@@ -74,7 +74,7 @@
 void AddPoolAndSetData(uint32_t size, Request& request, const float* data);
 
 template<typename HalModel>
-void AddOperand(HalModel& model, const Operand& op)
+void AddOperand(HalModel& model, const V1_0::Operand& op)
 {
     model.operands.resize(model.operands.size() + 1);
     model.operands[model.operands.size() - 1] = op;
@@ -87,11 +87,11 @@
     location.offset = model.operandValues.size();
     location.length = sizeof(int32_t);
 
-    Operand op    = {};
-    op.type       = OperandType::INT32;
-    op.dimensions = hidl_vec<uint32_t>{};
-    op.lifetime   = OperandLifeTime::CONSTANT_COPY;
-    op.location   = location;
+    V1_0::Operand op    = {};
+    op.type             = V1_0::OperandType::INT32;
+    op.dimensions       = hidl_vec<uint32_t>{};
+    op.lifetime         = V1_0::OperandLifeTime::CONSTANT_COPY;
+    op.location         = location;
 
     model.operandValues.resize(model.operandValues.size() + location.length);
     *reinterpret_cast<int32_t*>(&model.operandValues[location.offset]) = value;
@@ -112,8 +112,8 @@
 void AddTensorOperand(HalModel& model,
                       const hidl_vec<uint32_t>& dimensions,
                       const T* values,
-                      OperandType operandType = OperandType::TENSOR_FLOAT32,
-                      OperandLifeTime operandLifeTime = OperandLifeTime::CONSTANT_COPY)
+                      V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32,
+                      V1_0::OperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY)
 {
     uint32_t totalElements = 1;
     for (uint32_t dim : dimensions)
@@ -124,16 +124,16 @@
     DataLocation location = {};
     location.length = totalElements * sizeof(T);
 
-    if(operandLifeTime == OperandLifeTime::CONSTANT_COPY)
+    if(operandLifeTime == V1_0::OperandLifeTime::CONSTANT_COPY)
     {
         location.offset = model.operandValues.size();
     }
 
-    Operand op    = {};
-    op.type       = operandType;
-    op.dimensions = dimensions;
-    op.lifetime   = OperandLifeTime::CONSTANT_COPY;
-    op.location   = location;
+    V1_0::Operand op    = {};
+    op.type             = operandType;
+    op.dimensions       = dimensions;
+    op.lifetime         = V1_0::OperandLifeTime::CONSTANT_COPY;
+    op.location         = location;
 
     model.operandValues.resize(model.operandValues.size() + location.length);
     for (uint32_t i = 0; i < totalElements; i++)
@@ -148,8 +148,8 @@
 void AddTensorOperand(HalModel& model,
                       const hidl_vec<uint32_t>& dimensions,
                       const std::vector<T>& values,
-                      OperandType operandType = OperandType::TENSOR_FLOAT32,
-                      OperandLifeTime operandLifeTime = OperandLifeTime::CONSTANT_COPY)
+                      V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32,
+                      V1_0::OperandLifeTime operandLifeTime = V1_0::OperandLifeTime::CONSTANT_COPY)
 {
     AddTensorOperand<HalModel, T>(model, dimensions, values.data(), operandType, operandLifeTime);
 }
@@ -157,13 +157,13 @@
 template<typename HalModel>
 void AddInputOperand(HalModel& model,
                      const hidl_vec<uint32_t>& dimensions,
-                     OperandType operandType = OperandType::TENSOR_FLOAT32)
+                     V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32)
 {
-    Operand op    = {};
-    op.type       = operandType;
-    op.scale      = operandType == OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
-    op.dimensions = dimensions;
-    op.lifetime   = OperandLifeTime::MODEL_INPUT;
+    V1_0::Operand op    = {};
+    op.type             = operandType;
+    op.scale            = operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
+    op.dimensions       = dimensions;
+    op.lifetime         = V1_0::OperandLifeTime::MODEL_INPUT;
 
     AddOperand<HalModel>(model, op);
 
@@ -174,13 +174,13 @@
 template<typename HalModel>
 void AddOutputOperand(HalModel& model,
                       const hidl_vec<uint32_t>& dimensions,
-                      OperandType operandType = OperandType::TENSOR_FLOAT32)
+                      V1_0::OperandType operandType = V1_0::OperandType::TENSOR_FLOAT32)
 {
-    Operand op    = {};
-    op.type       = operandType;
-    op.scale      = operandType == OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
-    op.dimensions = dimensions;
-    op.lifetime   = OperandLifeTime::MODEL_OUTPUT;
+    V1_0::Operand op    = {};
+    op.type             = operandType;
+    op.scale            = operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM ? 1.f / 255.f : 0.f;
+    op.dimensions       = dimensions;
+    op.lifetime         = V1_0::OperandLifeTime::MODEL_OUTPUT;
 
     AddOperand<HalModel>(model, op);
 
@@ -188,14 +188,14 @@
     model.outputIndexes[model.outputIndexes.size() - 1] = model.operands.size() - 1;
 }
 
-android::sp<IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
-                                                   armnn_driver::ArmnnDriver& driver,
-                                                   ErrorStatus& prepareStatus,
-                                                   ErrorStatus expectedStatus = ErrorStatus::NONE);
+android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_0::Model& model,
+                                                         armnn_driver::ArmnnDriver& driver,
+                                                         ErrorStatus& prepareStatus,
+                                                         ErrorStatus expectedStatus = ErrorStatus::NONE);
 
 #ifdef ARMNN_ANDROID_NN_V1_1
 
-android::sp<IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
+android::sp<V1_0::IPreparedModel> PrepareModelWithStatus(const V1_1::Model& model,
                                                    armnn_driver::ArmnnDriver& driver,
                                                    ErrorStatus& prepareStatus,
                                                    ErrorStatus expectedStatus = ErrorStatus::NONE);
@@ -203,18 +203,18 @@
 #endif
 
 template<typename HalModel>
-android::sp<IPreparedModel> PrepareModel(const HalModel& model,
+android::sp<V1_0::IPreparedModel> PrepareModel(const HalModel& model,
                                          armnn_driver::ArmnnDriver& driver)
 {
     ErrorStatus prepareStatus = ErrorStatus::NONE;
     return PrepareModelWithStatus(model, driver, prepareStatus);
 }
 
-ErrorStatus Execute(android::sp<IPreparedModel> preparedModel,
+ErrorStatus Execute(android::sp<V1_0::IPreparedModel> preparedModel,
                     const Request& request,
                     ErrorStatus expectedStatus = ErrorStatus::NONE);
 
-android::sp<ExecutionCallback> ExecuteNoWait(android::sp<IPreparedModel> preparedModel,
+android::sp<ExecutionCallback> ExecuteNoWait(android::sp<V1_0::IPreparedModel> preparedModel,
                                              const Request& request);
 
 } // namespace driverTestHelpers
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
index 5c204ca..6ab63ff 100644
--- a/test/FullyConnected.cpp
+++ b/test/FullyConnected.cpp
@@ -39,7 +39,7 @@
     model.operations[0].outputs = hidl_vec<uint32_t>{4};
 
     // make the prepared model
-    android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+    android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
 
     // construct the request
     DataLocation inloc = {};
@@ -118,7 +118,7 @@
     model.operations[0].outputs = hidl_vec<uint32_t>{4};
 
     // make the prepared model
-    android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+    android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
 
 
     // construct the request
@@ -205,7 +205,7 @@
     model.operations[0].outputs = hidl_vec<uint32_t>{4};
 
     // make the prepared model
-    android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+    android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
 
 
     // construct the request
diff --git a/test/Lstm.cpp b/test/Lstm.cpp
index 66f2cf0..b1b7c9d 100644
--- a/test/Lstm.cpp
+++ b/test/Lstm.cpp
@@ -137,7 +137,7 @@
 
     // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
     //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
-    AddTensorOperand(model, inputToInputWeightsDimensions, inputToInputWeightsValue, OperandType::TENSOR_FLOAT32,
+    AddTensorOperand(model, inputToInputWeightsDimensions, inputToInputWeightsValue, V1_0::OperandType::TENSOR_FLOAT32,
                      CreateNoValueLifeTime(inputToInputWeightsDimensions));
     // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
     //     [num_units, input_size].
@@ -151,7 +151,7 @@
     //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
     //     “num_units”), or the second dimension of the “projection_weights”, if defined.
     AddTensorOperand(model, recurrentToInputWeightsDimensions, recurrentToInputWeightsValue,
-                     OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
+                     V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(recurrentToInputWeightsDimensions));
     // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
     //     [num_units, output_size].
     AddTensorOperand(model, recurrentToForgetWeightsDimensions, recurrentToForgetWeightsValue);
@@ -163,16 +163,16 @@
     AddTensorOperand(model, recurrentToOutputWeightsDimensions, recurrentToOutputWeightsValue);
     // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
     AddTensorOperand(model, cellToInputWeightsDimensions, cellToInputWeightsValue,
-                     OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToInputWeightsDimensions));
+                     V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToInputWeightsDimensions));
     // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
     AddTensorOperand(model, cellToForgetWeightsDimensions, cellToForgetWeightsValue,
-                     OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToForgetWeightsDimensions));
+                     V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToForgetWeightsDimensions));
     // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
     AddTensorOperand(model, cellToOutputWeightsDimensions, cellToOutputWeightsValue,
-                     OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToOutputWeightsDimensions));
+                     V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(cellToOutputWeightsDimensions));
     // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
     AddTensorOperand(model, inputGateBiasDimensions, inputGateBiasValue,
-                     OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(inputGateBiasDimensions));
+                     V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(inputGateBiasDimensions));
     // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
     AddTensorOperand(model, forgetGateBiasDimensions, forgetGateBiasValue);
     // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
@@ -182,10 +182,10 @@
     // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
     //     [output_size, num_units].
     AddTensorOperand(model, projectionWeightsDimensions, projectionWeightsValue,
-                     OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionWeightsDimensions));
+                     V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionWeightsDimensions));
     // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
     AddTensorOperand(model, projectionBiasDimensions, projectionBiasValue,
-                     OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionBiasDimensions));
+                     V1_0::OperandType::TENSOR_FLOAT32, CreateNoValueLifeTime(projectionBiasDimensions));
 
     // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
     AddInputOperand(model, outputStateInDimensions);
@@ -196,15 +196,15 @@
     // 20: The activation function: A value indicating the activation function:
     //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
     AddTensorOperand(model, activationFunctionDimensions,
-                     activationFunctionValue, OperandType::INT32);
+                     activationFunctionValue, V1_0::OperandType::INT32);
     // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
     //     If set to 0.0 then clipping is disabled.
     AddTensorOperand(model, cellClippingThresholdDimensions,
-                     cellClippingThresholdValue, OperandType::FLOAT32);
+                     cellClippingThresholdValue, V1_0::OperandType::FLOAT32);
     // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
     //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
     AddTensorOperand(model, projectionClippingThresholdDimensions,
-                     projectionClippingThresholdValue, OperandType::FLOAT32);
+                     projectionClippingThresholdValue, V1_0::OperandType::FLOAT32);
 
     // Outputs:
     //  0: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
@@ -261,7 +261,7 @@
     float* outputData = static_cast<float*>(static_cast<void*>(outputMemory->getPointer()));
 
     // make the prepared model and run the execution
-    android::sp<IPreparedModel> preparedModel = PrepareModel(model, *driver);
+    android::sp<V1_0::IPreparedModel> preparedModel = PrepareModel(model, *driver);
     if (preparedModel.get() != nullptr)
     {
         Execute(preparedModel, request);
diff --git a/test/Merger.cpp b/test/Merger.cpp
index 118e0d6..16ac451 100644
--- a/test/Merger.cpp
+++ b/test/Merger.cpp
@@ -51,7 +51,7 @@
 
     // make the prepared model
     ErrorStatus prepareStatus=ErrorStatus::NONE;
-    android::sp<IPreparedModel> preparedModel = PrepareModelWithStatus(model,
+    android::sp<V1_0::IPreparedModel> preparedModel = PrepareModelWithStatus(model,
                                                                        *driver,
                                                                        prepareStatus,
                                                                        expectedPrepareStatus);