IVGCVSW-4447 Add Hal 1_3 Support

* Add new 1.3 files HalPolicy, ArmnnDriver, ArmnnDriverImpl
* Add new .rc file for 1.3 service
* Add ArmnnPreparedModel_1_3 and implement new functions
* Update Android.mk with 1.3 driver and service
* Refactor ifdef to include ARMNN_ANDROID_NN_V1_3
* Create Utils getMainModel for new 1.3 Model Main Subgraph
* Use android Utils to convertToV1_X in ArmnnPrepapredModel_1_3
* Refactor HAL 1.2 convert functions into ConversionUtils_1_2.hpp
* Replace ArmnnBurstExecutorWithCache with call to ExecutionBurstServer

Signed-off-by: Kevin May <kevin.may@arm.com>
Change-Id: I514069e9e1b16bcd1c4abfb5d563d25ac22d02e3
diff --git a/Utils.cpp b/Utils.cpp
index c548f84..8a17b53 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -103,7 +103,7 @@
     return ret;
 }
 
-#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
 
 armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
 {
@@ -164,13 +164,74 @@
 
 #endif
 
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+
+armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
+{
+    using namespace armnn;
+    bool perChannel = false;
+
+    DataType type;
+    switch (operand.type)
+    {
+        case V1_3::OperandType::TENSOR_FLOAT32:
+            type = armnn::DataType::Float32;
+            break;
+        case V1_3::OperandType::TENSOR_FLOAT16:
+            type = armnn::DataType::Float16;
+            break;
+        case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
+            type = armnn::DataType::QAsymmU8;
+            break;
+        case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+            perChannel=true;
+            ARMNN_FALLTHROUGH;
+        case V1_3::OperandType::TENSOR_QUANT8_SYMM:
+            type = armnn::DataType::QSymmS8;
+            break;
+        case V1_3::OperandType::TENSOR_QUANT16_SYMM:
+            type = armnn::DataType::QSymmS16;
+            break;
+        case V1_3::OperandType::TENSOR_INT32:
+            type = armnn::DataType::Signed32;
+            break;
+        case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+            type = armnn::DataType::QAsymmS8;
+            break;
+        default:
+            throw UnsupportedOperand<V1_3::OperandType>(operand.type);
+    }
+
+    TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
+    if (perChannel)
+    {
+        // ExtraParams is expected to be of type channelQuant
+        BOOST_ASSERT(operand.extraParams.getDiscriminator() ==
+                     V1_3::Operand::ExtraParams::hidl_discriminator::channelQuant);
+
+        auto perAxisQuantParams = operand.extraParams.channelQuant();
+
+        ret.SetQuantizationScales(perAxisQuantParams.scales);
+        ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
+    }
+    else
+    {
+        ret.SetQuantizationScale(operand.scale);
+        ret.SetQuantizationOffset(operand.zeroPoint);
+    }
+
+    return ret;
+}
+
+#endif
+
 std::string GetOperandSummary(const V1_0::Operand& operand)
 {
     return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
         toString(operand.type);
 }
 
-#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
+#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
 
 std::string GetOperandSummary(const V1_2::Operand& operand)
 {
@@ -180,6 +241,16 @@
 
 #endif
 
+#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
+
+std::string GetOperandSummary(const V1_3::Operand& operand)
+{
+    return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
+           toString(operand.type);
+}
+
+#endif
+
 using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
     unsigned int elementIndex,
     std::ofstream& fileStream);
@@ -449,6 +520,27 @@
     }
 }
 
+void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
+{
+    if (memPools.empty())
+    {
+        return;
+    }
+    // Commit output buffers.
+    // Note that we update *all* pools, even if they aren't actually used as outputs -
+    // this is simpler and is what the CpuExecutor does.
+    for (auto& pool : memPools)
+    {
+        // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
+        // update() has been removed and flush() added.
+#if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
+        pool.flush();
+#else
+        pool.update();
+#endif
+    }
+}
+
 
 
 } // namespace armnn_driver