IVGCVSW-3181 Add HAL 1.2 support to android-nn-driver

 * Updated Android.mk to build HAL 1.2 driver
 * Added 1.2 HalPolicy and ArmnnDriver
 * Added 1.2 ArmnnPreparedModel
 * Updated converters and utilities to accept new HAL 1.2 operands and operand types.

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I62856deab24e106f72cccce09468db4971756fa6
diff --git a/Utils.cpp b/Utils.cpp
index f5599f7..c3c6310 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -63,7 +63,7 @@
 
     // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where
     // "buffer" has been made private and must be accessed via the accessor method "getBuffer".
-#if defined(ARMNN_ANDROID_P) // Use the new Android P implementation.
+#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q) // Use the new Android implementation.
     uint8_t* memPoolBuffer = memPool.getBuffer();
 #else // Fallback to the old Android O implementation.
     uint8_t* memPoolBuffer = memPool.buffer;
@@ -90,7 +90,7 @@
             type = armnn::DataType::Signed32;
             break;
         default:
-            throw UnsupportedOperand(operand.type);
+            throw UnsupportedOperand<V1_0::OperandType>(operand.type);
     }
 
     armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
@@ -101,12 +101,56 @@
     return ret;
 }
 
+#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+
+armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
+{
+    armnn::DataType type;
+
+    switch (operand.type)
+    {
+        case V1_2::OperandType::TENSOR_FLOAT32:
+            type = armnn::DataType::Float32;
+            break;
+        case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
+            type = armnn::DataType::QuantisedAsymm8;
+            break;
+        case V1_2::OperandType::TENSOR_QUANT16_SYMM:
+            type = armnn::DataType::QuantisedSymm16;
+            break;
+        case V1_2::OperandType::TENSOR_INT32:
+            type = armnn::DataType::Signed32;
+            break;
+        default:
+            throw UnsupportedOperand<V1_2::OperandType>(operand.type);
+    }
+
+    armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
+
+    ret.SetQuantizationScale(operand.scale);
+    ret.SetQuantizationOffset(operand.zeroPoint);
+
+    return ret;
+}
+
+#endif
+
 std::string GetOperandSummary(const V1_0::Operand& operand)
 {
     return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
         toString(operand.type);
 }
 
+#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+
+std::string GetOperandSummary(const V1_2::Operand& operand)
+{
+    return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
+           toString(operand.type);
+}
+
+#endif
+
 using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
     unsigned int elementIndex,
     std::ofstream& fileStream);