IVGCVSW-2503 Refactor RefElementwiseWorkload around Equal and Greater

	* Remove Equal and Greater from RefElementwiseWorkload
	* Create RefComparisonWorkload and add Equal and Greater
	* Update ElementwiseFunction for different input/output types
	* Update TfParser to create Equal/Greater with Boolean output
	* Update relevant tests to check for Boolean comparison

Change-Id: I299b7f2121769c960ac0c6139764a5f3c89c9c32
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 7784cc6..2d54335 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -38,7 +38,7 @@
 // Makes a workload for one the specified types based on the data type requirements of the tensorinfo.
 // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
 template <typename Float16Workload, typename Float32Workload, typename Uint8Workload, typename Int32Workload,
-    typename QueueDescriptorType, typename... Args>
+          typename BooleanWorkload, typename QueueDescriptorType, typename... Args>
 std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
                                               const WorkloadInfo& info,
                                               Args&&... args)
@@ -47,8 +47,10 @@
         info.m_InputTensorInfos[0].GetDataType()
         : info.m_OutputTensorInfos[0].GetDataType();
 
-    BOOST_ASSERT(info.m_InputTensorInfos.empty() || info.m_OutputTensorInfos.empty()
-        || info.m_InputTensorInfos[0].GetDataType() == info.m_OutputTensorInfos[0].GetDataType());
+    BOOST_ASSERT(info.m_InputTensorInfos.empty()  ||
+                 info.m_OutputTensorInfos.empty() ||
+                 ((info.m_InputTensorInfos[0].GetDataType() == info.m_OutputTensorInfos[0].GetDataType()) ||
+                   info.m_OutputTensorInfos[0].GetDataType() == armnn::DataType::Boolean));
 
     switch (dataType)
     {
@@ -60,6 +62,8 @@
             return MakeWorkloadForType<Uint8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
         case DataType::Signed32:
             return MakeWorkloadForType<Int32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
+        case DataType::Boolean:
+            return MakeWorkloadForType<BooleanWorkload>::Func(descriptor, info, std::forward<Args>(args)...);
         default:
             BOOST_ASSERT_MSG(false, "Unknown DataType.");
             return nullptr;
@@ -67,16 +71,18 @@
 }
 
 // Makes a workload for one the specified types based on the data type requirements of the tensorinfo.
-// Calling this method is the equivalent of calling the three typed MakeWorkload method with <FloatWorkload,
-// FloatWorkload, Uint8Workload>.
+// Calling this method is the equivalent of calling the five typed MakeWorkload method with <FloatWorkload,
+// FloatWorkload, Uint8Workload, NullWorkload, NullWorkload>.
 // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
 template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
 std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
                                               const WorkloadInfo& info,
                                               Args&&... args)
 {
-    return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload, NullWorkload>(descriptor, info,
-       std::forward<Args>(args)...);
+    return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload, NullWorkload, NullWorkload>(
+        descriptor,
+        info,
+        std::forward<Args>(args)...);
 }
 
 } //namespace