IVGCVSW-2379 Add Greater Ref workload implementation

 * Added the Greater operation as an element-wise workload
 * Added the unit tests

Change-Id: Ie00ee30e47a5f5e17a728032eeb11a085d06c8f2
diff --git a/src/backends/backendsCommon/StringMapping.hpp b/src/backends/backendsCommon/StringMapping.hpp
index 8541195..073a5a6 100644
--- a/src/backends/backendsCommon/StringMapping.hpp
+++ b/src/backends/backendsCommon/StringMapping.hpp
@@ -19,11 +19,12 @@
     enum Id {
         RefAdditionWorkload_Execute,
         RefEqualWorkload_Execute,
-        RefSubtractionWorkload_Execute,
-        RefMaximumWorkload_Execute,
-        RefMultiplicationWorkload_Execute,
         RefDivisionWorkload_Execute,
+        RefGreaterWorkload_Execute,
+        RefMaximumWorkload_Execute,
         RefMinimumWorkload_Execute,
+        RefMultiplicationWorkload_Execute,
+        RefSubtractionWorkload_Execute,
         MAX_STRING_ID
     };
 
@@ -38,12 +39,13 @@
     StringMapping()
     {
         m_Strings[RefAdditionWorkload_Execute] = "RefAdditionWorkload_Execute";
-        m_Strings[RefEqualWorkload_Execute] = "RefEqualWorkload_Execute";
-        m_Strings[RefSubtractionWorkload_Execute] = "RefSubtractionWorkload_Execute";
-        m_Strings[RefMaximumWorkload_Execute] = "RefMaximumWorkload_Execute";
-        m_Strings[RefMultiplicationWorkload_Execute] = "RefMultiplicationWorkload_Execute";
         m_Strings[RefDivisionWorkload_Execute] = "RefDivisionWorkload_Execute";
+        m_Strings[RefEqualWorkload_Execute] = "RefEqualWorkload_Execute";
+        m_Strings[RefGreaterWorkload_Execute] = "RefGreaterWorkload_Execute";
+        m_Strings[RefMaximumWorkload_Execute] = "RefMaximumWorkload_Execute";
         m_Strings[RefMinimumWorkload_Execute] = "RefMinimumWorkload_Execute";
+        m_Strings[RefMultiplicationWorkload_Execute] = "RefMultiplicationWorkload_Execute";
+        m_Strings[RefSubtractionWorkload_Execute] = "RefSubtractionWorkload_Execute";
     }
 
     StringMapping(const StringMapping &) = delete;
@@ -52,4 +54,4 @@
     const char * m_Strings[MAX_STRING_ID];
 };
 
-} //namespace armnn
\ No newline at end of file
+} //namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 67cee1c..8847b4e 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1026,4 +1026,17 @@
                                        "second input");
 }
 
+void GreaterQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateTwoInputs(workloadInfo, "GreaterQueueDescriptor");
+    ValidateSingleOutput(workloadInfo, "GreaterQueueDescriptor");
+
+    ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+                                       workloadInfo.m_InputTensorInfos[1],
+                                       workloadInfo.m_OutputTensorInfos[0],
+                                       "GreaterQueueDescriptor",
+                                       "first input",
+                                       "second input");
+}
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index 4dc49f9..43b0d33 100755
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -1674,6 +1674,15 @@
     return workloadFactory.CreateEqual(descriptor, info);
 }
 
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::GreaterQueueDescriptor>(
+        const armnn::IWorkloadFactory& workloadFactory,
+        const armnn::WorkloadInfo& info,
+        const armnn::GreaterQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateGreater(descriptor, info);
+}
+
 namespace {
     template <typename Descriptor, typename dataType>
     LayerTestResult<dataType, 4> ElementwiseTestHelper
@@ -1897,6 +1906,170 @@
              0);
 }
 
+LayerTestResult<float, 4> GreaterSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width = 2;
+    const unsigned int height = 2;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 2;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
+                                3, 3, 3, 3,  4, 4, 4, 4 });
+
+    std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
+                                5, 5, 5, 5,  4, 4, 4, 4 });
+
+    std::vector<float> output({ 0, 0, 0, 0,  1, 1, 1, 1,
+                                0, 0, 0, 0,  0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
+            (workloadFactory,
+             memoryManager,
+             shape,
+             input0,
+             shape,
+             input1,
+             shape,
+             output);
+}
+
+LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+    std::vector<float> input1({ 1 });
+
+    std::vector<float> output({ 0, 1, 1, 1, 1, 1, 1, 1});
+
+    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
+            (workloadFactory,
+             memoryManager,
+             shape0,
+             input0,
+             shape1,
+             input1,
+             shape0,
+             output);
+}
+
+LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<float> input0({ 1, 2.9f, 2.1f, 4, 5, 6,
+                                7, 8, 9, 10, 11, 12 });
+
+    std::vector<float> input1({ 1, 3, 2});
+
+    std::vector<float> output({ 0, 0, 1, 1, 1, 1,
+                                1, 1, 1, 1, 1, 1 });
+
+    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, float>
+            (workloadFactory,
+             memoryManager,
+             shape0,
+             input0,
+             shape1,
+             input1,
+             shape0,
+             output);
+}
+
+LayerTestResult<uint8_t, 4> GreaterUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape[] = { 2, 2, 2, 2 };
+
+    // See dequantized values to the right.
+    std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
+                                  3, 3, 3, 3, 5, 5, 5, 5 });
+
+    std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
+                                  2, 2, 2, 2, 5, 5, 5, 5 });
+
+    std::vector<uint8_t> output({ 0, 0, 0, 0, 0, 0, 0, 0,
+                                  1, 1, 1, 1, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
+            (workloadFactory,
+             memoryManager,
+             shape,
+             input0,
+             shape,
+             input1,
+             shape,
+             output,
+             1.0f,
+             0);
+}
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
+                                  7, 8, 9, 10, 11, 12 });
+
+    std::vector<uint8_t> input1({ 1 });
+
+    std::vector<uint8_t> output({ 0, 1, 1, 1, 1, 1,
+                                  1, 1, 1, 1, 1, 1 });
+
+    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t >
+            (workloadFactory,
+             memoryManager,
+             shape0,
+             input0,
+             shape1,
+             input1,
+             shape0,
+             output,
+             1.0f,
+             0);
+}
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
+                                  7, 8, 9, 10, 11, 12 });
+
+    std::vector<uint8_t> input1({ 1, 1, 3});
+
+    std::vector<uint8_t> output({ 0, 1, 0, 1, 1, 1,
+                                  1, 1, 1, 1, 1, 1 });
+
+    return ElementwiseTestHelper<armnn::GreaterQueueDescriptor, uint8_t>
+            (workloadFactory,
+             memoryManager,
+             shape0,
+             input0,
+             shape1,
+             input1,
+             shape0,
+             output,
+             1.0f,
+             0);
+}
+
 LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 029418e..146f8c4 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -891,6 +891,30 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
+LayerTestResult<float, 4> GreaterSimpleTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> GreaterBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> GreaterBroadcast1DVectorTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> GreaterBroadcast1DVectorUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
 LayerTestResult<float, 2> FullyConnectedLargeTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 82b4647..d02dfb9 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -303,6 +303,18 @@
                                    descriptor);
 }
 
+bool ClLayerSupport::IsGreaterSupported(const TensorInfo& input0,
+                                        const TensorInfo& input1,
+                                        const TensorInfo& output,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input0);
+    ignore_unused(input1);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
+    return false;
+}
+
 bool ClLayerSupport::IsInputSupported(const TensorInfo& input,
                                       Optional<std::string&> reasonIfUnsupported) const
 {
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 82efd00..d040a54 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -86,6 +86,11 @@
                                    const FullyConnectedDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsGreaterSupported(const TensorInfo& input0,
+                            const TensorInfo& input1,
+                            const TensorInfo& ouput,
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 0033b86..869fd03 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -261,6 +261,18 @@
                                    descriptor);
 }
 
+bool NeonLayerSupport::IsGreaterSupported(const TensorInfo& input0,
+                                          const TensorInfo& input1,
+                                          const TensorInfo& output,
+                                          Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input0);
+    ignore_unused(input1);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
+    return false;
+}
+
 bool NeonLayerSupport::IsInputSupported(const TensorInfo& input,
                                         Optional<std::string&> reasonIfUnsupported) const
 {
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index 5724ed8..43d0bd9 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -81,6 +81,11 @@
                                    const FullyConnectedDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsGreaterSupported(const TensorInfo& input0,
+                            const TensorInfo& input1,
+                            const TensorInfo& ouput,
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 2952ae1..a64339e 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -257,6 +257,21 @@
                                      &TrueFunc<>);
 }
 
+bool RefLayerSupport::IsGreaterSupported(const TensorInfo& input0,
+                                         const TensorInfo& input1,
+                                         const TensorInfo& output,
+                                         Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input0);
+    ignore_unused(input1);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
+    return IsSupportedForDataTypeRef(reasonIfUnsupported,
+                                     input0.GetDataType(),
+                                     &TrueFunc<>,
+                                     &TrueFunc<>);
+}
+
 bool RefLayerSupport::IsInputSupported(const TensorInfo& input,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 399f7b5..3941f4b 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -91,6 +91,11 @@
                                    const FullyConnectedDescriptor& descriptor,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsGreaterSupported(const TensorInfo& input0,
+                            const TensorInfo& input1,
+                            const TensorInfo& ouput,
+                            Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsInputSupported(const TensorInfo& input,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 8173bbb..eb8807e 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -303,7 +303,7 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateGreater(const GreaterQueueDescriptor& descriptor,
                                                              const WorkloadInfo& info) const
 {
-    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkload<RefGreaterFloat32Workload, RefGreaterUint8Workload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index eda58a9..6e7da13 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -241,6 +241,14 @@
 ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
 ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVectorUint8, EqualBroadcast1DVectorUint8Test)
 
+// Greater
+ARMNN_AUTO_TEST_CASE(SimpleGreater, GreaterSimpleTest)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVector, GreaterBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_CASE(GreaterUint8, GreaterUint8Test)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE(GreaterBroadcast1DVectorUint8, GreaterBroadcast1DVectorUint8Test)
+
 // Max
 ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
 ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index 18ceade..cb8aa70 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -32,4 +32,5 @@
 template struct armnn::ElementwiseFunction<std::divides<float>>;
 template struct armnn::ElementwiseFunction<armnn::maximum<float>>;
 template struct armnn::ElementwiseFunction<armnn::minimum<float>>;
-template struct armnn::ElementwiseFunction<std::equal_to<float>>;
\ No newline at end of file
+template struct armnn::ElementwiseFunction<std::equal_to<float>>;
+template struct armnn::ElementwiseFunction<std::greater<float>>;
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index d00bfd0..13d6e70 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -45,11 +45,11 @@
     std::vector<float> results(outputInfo.GetNumElements());
 
     ElementwiseFunction<Functor>(inputInfo0.GetShape(),
-                                inputInfo1.GetShape(),
-                                outputInfo.GetShape(),
-                                dequant0.data(),
-                                dequant1.data(),
-                                results.data());
+                                 inputInfo1.GetShape(),
+                                 outputInfo.GetShape(),
+                                 dequant0.data(),
+                                 dequant1.data(),
+                                 results.data());
 
     Quantize(GetOutputTensorDataU8(0, data), results.data(), outputInfo);
 }
@@ -76,3 +76,6 @@
 
 template class armnn::BaseFloat32ElementwiseWorkload<armnn::EqualQueueDescriptor, std::equal_to<float>>;
 template class armnn::BaseUint8ElementwiseWorkload<armnn::EqualQueueDescriptor, std::equal_to<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::GreaterQueueDescriptor, std::greater<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::GreaterQueueDescriptor, std::greater<float>>;
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
index c2855b0..1b3200f 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
@@ -12,8 +12,6 @@
 #include "Maximum.hpp"
 #include "Minimum.hpp"
 
-
-
 namespace armnn
 {
 
@@ -86,7 +84,6 @@
                           AdditionQueueDescriptor,
                           StringMapping::RefAdditionWorkload_Execute>;
 
-
 using RefSubtractionFloat32Workload =
     RefElementwiseWorkload<std::minus<float>,
                           DataType::Float32,
@@ -132,9 +129,9 @@
 
 using RefMaximumUint8Workload =
     RefElementwiseWorkload<armnn::maximum<float>,
-                           DataType::QuantisedAsymm8,
-                           MaximumQueueDescriptor,
-                           StringMapping::RefMaximumWorkload_Execute>;
+                          DataType::QuantisedAsymm8,
+                          MaximumQueueDescriptor,
+                          StringMapping::RefMaximumWorkload_Execute>;
 
 using RefMinimumFloat32Workload =
     RefElementwiseWorkload<minimum<float>,
@@ -159,4 +156,16 @@
                           DataType::QuantisedAsymm8,
                           EqualQueueDescriptor,
                           StringMapping::RefEqualWorkload_Execute>;
+
+using RefGreaterFloat32Workload =
+    RefElementwiseWorkload<std::greater<float>,
+                          DataType::Float32,
+                          GreaterQueueDescriptor,
+                          StringMapping::RefGreaterWorkload_Execute>;
+
+using RefGreaterUint8Workload =
+    RefElementwiseWorkload<std::greater<float>,
+                          DataType::QuantisedAsymm8,
+                          GreaterQueueDescriptor,
+                          StringMapping::RefGreaterWorkload_Execute>;
 } // armnn