IVGCVSW-4407 HAL 1.3 Operators Support

* IVGCVSW-4441 Add Support for ANEURALNETWORKS_ELU
* IVGCVSW-4443 Add Support for ANEURALNETWORKS_HARD_SWISH
* IVGCVSW-4448 Add TENSOR_QUANT8_ASYMM_SIGNED data type support

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Idb9bb3f463b956221711423c15b6557eeb1af7db
diff --git a/1.3/HalPolicy.cpp b/1.3/HalPolicy.cpp
index 0de7573..28d7319 100644
--- a/1.3/HalPolicy.cpp
+++ b/1.3/HalPolicy.cpp
@@ -45,6 +45,8 @@
             return ConvertDequantize(operation, model, data);
         case V1_3::OperationType::DIV:
             return ConvertDiv(operation, model, data);
+        case V1_3::OperationType::ELU:
+            return ConvertElu(operation, model, data);
         case V1_3::OperationType::EQUAL:
             return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
         case V1_3::OperationType::EXPAND_DIMS:
@@ -59,6 +61,8 @@
             return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
         case V1_3::OperationType::GROUPED_CONV_2D:
             return ConvertGroupedConv2d(operation, model, data);
+        case V1_3::OperationType::HARD_SWISH:
+            return ConvertHardSwish(operation, model, data);
         case V1_3::OperationType::INSTANCE_NORMALIZATION:
             return ConvertInstanceNormalization(operation, model, data);
         case V1_3::OperationType::L2_NORMALIZATION:
@@ -223,6 +227,15 @@
     return ::ConvertElementwiseUnary<hal_1_3::HalPolicy>(operation, model, data, unaryOperation);
 }
 
+bool HalPolicy::ConvertElu(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_3::HalPolicy::ConvertElu()");
+    ActivationDescriptor desc;
+    desc.m_Function = ActivationFunction::Elu;
+
+    return ::ConvertToActivation<hal_1_3::HalPolicy>(operation, __func__, desc, model, data);
+}
+
 bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_3::HalPolicy::ConvertExpandDims()");
@@ -247,6 +260,15 @@
     return ::ConvertGroupedConv2d<hal_1_3::HalPolicy>(operation, model, data);
 }
 
+bool HalPolicy::ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data)
+{
+    ALOGV("hal_1_3::HalPolicy::ConvertHardSwish()");
+    ActivationDescriptor desc;
+    desc.m_Function = ActivationFunction::HardSwish;
+
+    return ::ConvertToActivation<hal_1_3::HalPolicy>(operation, __func__, desc, model, data);
+}
+
 bool HalPolicy::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
 {
     ALOGV("hal_1_3::HalPolicy::ConvertInstanceNormalization()");
diff --git a/1.3/HalPolicy.hpp b/1.3/HalPolicy.hpp
index f7771a6..e3f21b1 100644
--- a/1.3/HalPolicy.hpp
+++ b/1.3/HalPolicy.hpp
@@ -68,6 +68,8 @@
                                         ConversionData& data,
                                         armnn::UnaryOperation unaryOperation);
 
+    static bool ConvertElu(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
@@ -76,6 +78,8 @@
 
     static bool ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data);
 
+    static bool ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data);
+
     static bool ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data);
 
     static bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data);
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 315089c..8067e53 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -207,9 +207,11 @@
 inline bool IsOperandTypeSupportedForTensors(V1_3::OperandType type)
 {
     return type == V1_3::OperandType::BOOL                           ||
+           type == V1_3::OperandType::TENSOR_BOOL8                   ||
            type == V1_3::OperandType::TENSOR_FLOAT16                 ||
            type == V1_3::OperandType::TENSOR_FLOAT32                 ||
            type == V1_3::OperandType::TENSOR_QUANT8_ASYMM            ||
+           type == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED     ||
            type == V1_3::OperandType::TENSOR_QUANT8_SYMM             ||
            type == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
            type == V1_3::OperandType::TENSOR_QUANT16_SYMM            ||
diff --git a/NnapiSupport.txt b/NnapiSupport.txt
index 84e643a..d5e077b 100644
--- a/NnapiSupport.txt
+++ b/NnapiSupport.txt
@@ -25,6 +25,7 @@
 DEPTHWISE_CONV_2D            (FLOAT32, QUANT8_ASYMM)
 DEQUANTIZE                   (FLOAT32 (output only), QUANT8_ASYMM (input only))
 DIV                          (FLOAT32, QUANT8_ASYMM)
+ELU                          (FLOAT32, QUANT8_ASYMM)
 EQUAL                        (FLOAT32, QUANT8_ASYMM)
 EXPAND_DIMS                  (FLOAT32, FLOAT16, QUANT8_ASYMM)
 FLOOR                        (FLOAT32)
@@ -32,6 +33,7 @@
 GREATER                      (FLOAT32, QUANT8_ASYMM)
 GREATER_EQUAL                (FLOAT32, QUANT8_ASYMM)
 GROUPED_CONV_2D              (FLOAT32, QUANT8_ASYMM)
+HARD_SWISH                   (FLOAT32, QUANT8_ASYMM)
 INSTANCE_NORMALIZATION       (FLOAT32)
 L2_NORMALIZATION             (FLOAT32)
 L2_POOL_2D                   (FLOAT32, QUANT8_ASYMM)
diff --git a/Utils.cpp b/Utils.cpp
index 8a17b53..aeee800 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -174,6 +174,9 @@
     DataType type;
     switch (operand.type)
     {
+        case V1_3::OperandType::TENSOR_BOOL8:
+            type = armnn::DataType::Boolean;
+            break;
         case V1_3::OperandType::TENSOR_FLOAT32:
             type = armnn::DataType::Float32;
             break;