IVGCVSW-4565 TENSOR_BOOL8 data type not supported in AndroidNN Driver

* Added TENSOR_BOOL8 support
* Added Broadcast support to comparision operators

!armnn:2903

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I844e32b57399eff2dc60af9b2099145316c80cae
diff --git a/1.2/HalPolicy.cpp b/1.2/HalPolicy.cpp
index b3ccc47..1811688 100644
--- a/1.2/HalPolicy.cpp
+++ b/1.2/HalPolicy.cpp
@@ -279,9 +279,11 @@
 
     IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
     assert(layer != nullptr);
-
-    input0.Connect(layer->GetInputSlot(0));
-    input1.Connect(layer->GetInputSlot(1));
+    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
 
     return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
 }
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index d4ca434..90b1c7d 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -189,6 +189,7 @@
 inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
 {
     return type == V1_2::OperandType::BOOL                           ||
+           type == V1_2::OperandType::TENSOR_BOOL8                   ||
            type == V1_2::OperandType::TENSOR_FLOAT16                 ||
            type == V1_2::OperandType::TENSOR_FLOAT32                 ||
            type == V1_2::OperandType::TENSOR_QUANT8_ASYMM            ||
diff --git a/Utils.cpp b/Utils.cpp
index c95f6e1..c548f84 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -113,6 +113,9 @@
     DataType type;
     switch (operand.type)
     {
+        case V1_2::OperandType::TENSOR_BOOL8:
+            type = armnn::DataType::Boolean;
+            break;
         case V1_2::OperandType::TENSOR_FLOAT32:
             type = armnn::DataType::Float32;
             break;