IVGCVSW-4453 Add Support for ANEURALNETWORKS_QLSTM to HAL 1.3 Driver

 * Add QLSTM support for Android NN Driver
 * Add overrideOutputInfo parameter to SetupAndTrackLayerOutputSlot
 * Add optional condition to GetInputScalar
 * Refactor Quantized 16 Bit LSTM impl

Change-Id: Ie8fa98ad5ee4a62174ef91ca80f1df62b7fde937
Signed-off-by: Keith Davis <keith.davis@arm.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 9e547fa..d55e587 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -100,7 +100,7 @@
         case V1_2::OperationType::QUANTIZE:
             return ConvertQuantize(operation, model, data);
         case V1_2::OperationType::QUANTIZED_16BIT_LSTM:
-            return ConvertQuantizedLstm(operation, model, data);
+            return ConvertQuantized16BitLstm(operation, model, data);
         case V1_2::OperationType::RELU:
             return ConvertReLu(operation, model, data);
         case V1_2::OperationType::RELU1:
@@ -338,10 +338,10 @@
     return ::ConvertQuantize<hal_1_2::HalPolicy>(operation, model, data);
 }
 
-bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
+bool HalPolicy::ConvertQuantized16BitLstm(const Operation& operation, const Model& model, ConversionData& data)
 {
-    ALOGV("hal_1_2::HalPolicy::ConvertQuantizedLstm()");
-    return ::ConvertQuantizedLstm<hal_1_2::HalPolicy>(operation, model, data);
+    ALOGV("hal_1_2::HalPolicy::ConvertQuantized16BitLstm()");
+    return ::ConvertQuantized16BitLstm<hal_1_2::HalPolicy>(operation, model, data);
 }
 
 bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)