IVGCVSW-3592 Add Support for Quantize to HAL 1.2 Driver

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ie7421078b2bdd16d7ac67b635953b34721e8c8fe
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index 8502640..dee2175 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -146,6 +146,8 @@
             return ConvertPadV2(operation, model, data);
         case V1_2::OperationType::PRELU:
             return ConvertPrelu(operation, model, data);
+        case V1_2::OperationType::QUANTIZE:
+            return ConvertQuantize(operation, model, data);
         case V1_2::OperationType::RELU:
             return ConvertReLu(operation, model, data);
         case V1_2::OperationType::RELU1:
@@ -751,6 +753,47 @@
     return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
 }
 
+bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_2::HalPolicy::ConvertQuantize()");
+
+    LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid input", __func__);
+    }
+
+    const Operand* const outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has invalid outputs", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsQuantizeSupported,
+                               data.m_Backends,
+                               isSupported,
+                               input.GetTensorInfo(),
+                               outputInfo);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
+}
+
 bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
diff --git a/1.2/HalPolicy.hpp b/1.2/HalPolicy.hpp
index 285a37f..a51b9a6 100644
--- a/1.2/HalPolicy.hpp
+++ b/1.2/HalPolicy.hpp
@@ -51,6 +51,8 @@
 
     static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index a57768d..47c5149 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -52,7 +52,9 @@
 DEPTHWISE_CONV_2D            (FLOAT32,QUANT8_ASYMM)
 MAXIMUM                      (FLOAT32,QUANT8_ASYMM)
 MINIMUM                      (FLOAT32,QUANT8_ASYMM)
+PAD_V2                       (FLOAT32,QUANT8_ASYMM)
 PRELU                        (FLOAT32,QUANT8_ASYMM)
+QUANTIZE                     (FLOAT32,QUANT8_ASYMM)
 RESIZE_NEAREST_NEIGHBOR      (FLOAT32,QUANT8_ASYMM)
 SOFTMAX                      (FLOAT32,QUANT8_ASYMM)
 
@@ -71,8 +73,6 @@
 
 CONCATENATION
 LSTM
-PAD_V2
-QUANTIZE
 QUANTIZED_16BIT_LSTM
 TRANSPOSE_CONV_2D