IVGCVSW-2365 Add Reference Equal Workload Implementation

 * Add reference equal workload
 * Add Reference Workload Unit Test

Change-Id: If2848e7dde4248566b99d91726d08143c40ff80d
diff --git a/src/backends/backendsCommon/StringMapping.hpp b/src/backends/backendsCommon/StringMapping.hpp
index aa7fb6d..8541195 100644
--- a/src/backends/backendsCommon/StringMapping.hpp
+++ b/src/backends/backendsCommon/StringMapping.hpp
@@ -18,6 +18,7 @@
 public:
     enum Id {
         RefAdditionWorkload_Execute,
+        RefEqualWorkload_Execute,
         RefSubtractionWorkload_Execute,
         RefMaximumWorkload_Execute,
         RefMultiplicationWorkload_Execute,
@@ -37,6 +38,7 @@
     StringMapping()
     {
         m_Strings[RefAdditionWorkload_Execute] = "RefAdditionWorkload_Execute";
+        m_Strings[RefEqualWorkload_Execute] = "RefEqualWorkload_Execute";
         m_Strings[RefSubtractionWorkload_Execute] = "RefSubtractionWorkload_Execute";
         m_Strings[RefMaximumWorkload_Execute] = "RefMaximumWorkload_Execute";
         m_Strings[RefMultiplicationWorkload_Execute] = "RefMultiplicationWorkload_Execute";
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 47d5364..67cee1c 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1013,4 +1013,17 @@
     ValidateSingleOutput(workloadInfo, "DebugQueueDescriptor");
 }
 
+void EqualQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateTwoInputs(workloadInfo, "EqualQueueDescriptor");
+    ValidateSingleOutput(workloadInfo, "EqualQueueDescriptor");
+
+    ValidateBroadcastTensorShapesMatch(workloadInfo.m_InputTensorInfos[0],
+                                       workloadInfo.m_InputTensorInfos[1],
+                                       workloadInfo.m_OutputTensorInfos[0],
+                                       "EqualQueueDescriptor",
+                                       "first input",
+                                       "second input");
+}
+
 } //namespace armnn
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index b44c835..4dc49f9 100755
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -1665,6 +1665,15 @@
     return workloadFactory.CreateMinimum(descriptor, info);
 }
 
+template<>
+std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::EqualQueueDescriptor>(
+        const armnn::IWorkloadFactory& workloadFactory,
+        const armnn::WorkloadInfo& info,
+        const armnn::EqualQueueDescriptor& descriptor)
+{
+    return workloadFactory.CreateEqual(descriptor, info);
+}
+
 namespace {
     template <typename Descriptor, typename dataType>
     LayerTestResult<dataType, 4> ElementwiseTestHelper
@@ -1724,6 +1733,169 @@
     }
 }
 
+LayerTestResult<float, 4> EqualSimpleTest(armnn::IWorkloadFactory& workloadFactory,
+                                          const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int width = 2;
+    const unsigned int height = 2;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 2;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
+                                3, 3, 3, 3,  4, 4, 4, 4 });
+
+    std::vector<float> input1({ 1, 1, 1, 1,  3, 3, 3, 3,
+                                5, 5, 5, 5,  4, 4, 4, 4 });
+
+    std::vector<float> output({ 1, 1, 1, 1,  0, 0, 0, 0,
+                                0, 0, 0, 0,  1, 1, 1, 1 });
+
+    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
+            (workloadFactory,
+             memoryManager,
+             shape,
+             input0,
+             shape,
+             input1,
+             shape,
+             output);
+}
+
+LayerTestResult<float, 4> EqualBroadcast1ElementTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+    std::vector<float> input1({ 1 });
+
+    std::vector<float> output({ 1, 0, 0, 0, 0, 0, 0, 0});
+
+    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
+            (workloadFactory,
+             memoryManager,
+             shape0,
+             input0,
+             shape1,
+             input1,
+             shape0,
+             output);
+}
+
+LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<float> input0({ 1, 2, 3, 4, 5, 6,
+                                7, 8, 9, 10, 11, 12 });
+
+    std::vector<float> input1({ 1, 2, 3});
+
+    std::vector<float> output({ 1, 1, 1, 0, 0, 0,
+                                0, 0, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, float>
+            (workloadFactory,
+             memoryManager,
+             shape0,
+             input0,
+             shape1,
+             input1,
+             shape0,
+             output);
+}
+
+LayerTestResult<uint8_t, 4> EqualUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    unsigned int shape[] = { 2, 2, 2, 2 };
+
+    // See dequantized values to the right.
+    std::vector<uint8_t> input0({ 1, 1, 1, 1, 6, 6, 6, 6,
+                                  3, 3, 3, 3, 5, 5, 5, 5 });
+
+    std::vector<uint8_t> input1({ 2, 2, 2, 2, 6, 6, 6, 6,
+                                  3, 3, 3, 3, 5, 5, 5, 5 });
+
+    std::vector<uint8_t> output({ 0, 0, 0, 0, 1, 1, 1, 1,
+                                  1, 1, 1, 1, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
+            (workloadFactory,
+             memoryManager,
+             shape,
+             input0,
+             shape,
+             input1,
+             shape,
+             output,
+             1.0f,
+             0);
+}
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
+                                  7, 8, 9, 10, 11, 12 });
+
+    std::vector<uint8_t> input1({ 1 });
+
+    std::vector<uint8_t> output({ 1, 0, 0, 0, 0, 0,
+                                  0, 0, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t >
+            (workloadFactory,
+             memoryManager,
+             shape0,
+             input0,
+             shape1,
+             input1,
+             shape0,
+             output,
+             1.0f,
+             0);
+}
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0({ 1, 2, 3, 4, 5, 6,
+                                  7, 8, 9, 10, 11, 12 });
+
+    std::vector<uint8_t> input1({ 1, 1, 3});
+
+    std::vector<uint8_t> output({ 1, 0, 1, 0, 0, 0,
+                                  0, 0, 0, 0, 0, 0 });
+
+    return ElementwiseTestHelper<armnn::EqualQueueDescriptor, uint8_t>
+            (workloadFactory,
+             memoryManager,
+             shape0,
+             input0,
+             shape1,
+             input1,
+             shape0,
+             output,
+             1.0f,
+             0);
+}
 
 LayerTestResult<float, 4> MaximumSimpleTest(armnn::IWorkloadFactory& workloadFactory,
                                            const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 1f38675..029418e 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -867,6 +867,30 @@
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor);
 
+LayerTestResult<float, 4> EqualSimpleTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> EqualBroadcast1ElementTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<float, 4> EqualBroadcast1DVectorTest(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> EqualUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1ElementUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+LayerTestResult<uint8_t, 4> EqualBroadcast1DVectorUint8Test(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
 LayerTestResult<float, 2> FullyConnectedLargeTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index eebab35..82b4647 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -252,6 +252,18 @@
                                    output);
 }
 
+bool ClLayerSupport::IsEqualSupported(const TensorInfo& input0,
+                                      const TensorInfo& input1,
+                                      const TensorInfo& output,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input0);
+    ignore_unused(input1);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
+    return false;
+}
+
 bool ClLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
                                                  const FakeQuantizationDescriptor& descriptor,
                                                  Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index 470fa2a..82efd00 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -66,6 +66,11 @@
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsEqualSupported(const TensorInfo& input0,
+                          const TensorInfo& input1,
+                          const TensorInfo& output,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsFakeQuantizationSupported(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 36c9f8b..0033b86 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -210,6 +210,18 @@
     return false;
 }
 
+bool NeonLayerSupport::IsEqualSupported(const TensorInfo& input0,
+                                        const TensorInfo& input1,
+                                        const TensorInfo& output,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input0);
+    ignore_unused(input1);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
+    return false;
+}
+
 bool NeonLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
                                                    const FakeQuantizationDescriptor& descriptor,
                                                    Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index e5cd3cc..5724ed8 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -61,6 +61,11 @@
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsEqualSupported(const TensorInfo& input0,
+                          const TensorInfo& input1,
+                          const TensorInfo& output,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsFakeQuantizationSupported(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 2c8f9cb..2952ae1 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -203,6 +203,21 @@
                                      &TrueFunc<>);
 }
 
+bool RefLayerSupport::IsEqualSupported(const TensorInfo& input0,
+                                       const TensorInfo& input1,
+                                       const TensorInfo& output,
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    ignore_unused(input0);
+    ignore_unused(input1);
+    ignore_unused(output);
+    ignore_unused(reasonIfUnsupported);
+    return IsSupportedForDataTypeRef(reasonIfUnsupported,
+                                     input0.GetDataType(),
+                                     &TrueFunc<>,
+                                     &TrueFunc<>);
+}
+
 bool RefLayerSupport::IsFakeQuantizationSupported(const TensorInfo& input,
                                                   const FakeQuantizationDescriptor& descriptor,
                                                   Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 9dc64cb..399f7b5 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -71,6 +71,11 @@
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsEqualSupported(const TensorInfo& input0,
+                          const TensorInfo& input1,
+                          const TensorInfo& output,
+                          Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsFakeQuantizationSupported(const TensorInfo& input,
                                      const FakeQuantizationDescriptor& descriptor,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index 110a947..8173bbb 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -285,7 +285,7 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateEqual(const EqualQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const
 {
-    return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkload<RefEqualFloat32Workload, RefEqualUint8Workload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateBatchToSpaceNd(const BatchToSpaceNdQueueDescriptor& descriptor,
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index d3c2231..eda58a9 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -233,6 +233,14 @@
 ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1Element, DivisionBroadcast1ElementUint8Test)
 ARMNN_AUTO_TEST_CASE(DivisionUint8Broadcast1DVector, DivisionBroadcast1DVectorUint8Test)
 
+// Equal
+ARMNN_AUTO_TEST_CASE(SimpleEqual, EqualSimpleTest)
+ARMNN_AUTO_TEST_CASE(EqualBroadcast1Element, EqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVector, EqualBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_CASE(EqualUint8, EqualUint8Test)
+ARMNN_AUTO_TEST_CASE(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_CASE(EqualBroadcast1DVectorUint8, EqualBroadcast1DVectorUint8Test)
+
 // Max
 ARMNN_AUTO_TEST_CASE(SimpleMaximum, MaximumSimpleTest)
 ARMNN_AUTO_TEST_CASE(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
diff --git a/src/backends/reference/workloads/ElementwiseFunction.cpp b/src/backends/reference/workloads/ElementwiseFunction.cpp
index 88d5190..18ceade 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.cpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.cpp
@@ -15,11 +15,11 @@
 
 template <typename Functor>
 ElementwiseFunction<Functor>::ElementwiseFunction(const TensorShape& inShape0,
-                                                const TensorShape& inShape1,
-                                                const TensorShape& outShape,
-                                                const float* inData0,
-                                                const float* inData1,
-                                                float* outData)
+                                                  const TensorShape& inShape1,
+                                                  const TensorShape& outShape,
+                                                  const float* inData0,
+                                                  const float* inData1,
+                                                  float* outData)
 {
     BroadcastLoop(inShape0, inShape1, outShape).Unroll(Functor(), 0, inData0, inData1, outData);
 }
@@ -31,4 +31,5 @@
 template struct armnn::ElementwiseFunction<std::multiplies<float>>;
 template struct armnn::ElementwiseFunction<std::divides<float>>;
 template struct armnn::ElementwiseFunction<armnn::maximum<float>>;
-template struct armnn::ElementwiseFunction<armnn::minimum<float>>;
\ No newline at end of file
+template struct armnn::ElementwiseFunction<armnn::minimum<float>>;
+template struct armnn::ElementwiseFunction<std::equal_to<float>>;
\ No newline at end of file
diff --git a/src/backends/reference/workloads/ElementwiseFunction.hpp b/src/backends/reference/workloads/ElementwiseFunction.hpp
index 5011616..0ac1364 100644
--- a/src/backends/reference/workloads/ElementwiseFunction.hpp
+++ b/src/backends/reference/workloads/ElementwiseFunction.hpp
@@ -14,11 +14,11 @@
 struct ElementwiseFunction
 {
     ElementwiseFunction(const TensorShape& inShape0,
-                       const TensorShape& inShape1,
-                       const TensorShape& outShape,
-                       const float* inData0,
-                       const float* inData1,
-                       float* outData);
+                        const TensorShape& inShape1,
+                        const TensorShape& outShape,
+                        const float* inData0,
+                        const float* inData1,
+                        float* outData);
 };
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.cpp b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
index a18c7c5..d00bfd0 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.cpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.cpp
@@ -73,3 +73,6 @@
 
 template class armnn::BaseFloat32ElementwiseWorkload<armnn::MinimumQueueDescriptor, armnn::minimum<float>>;
 template class armnn::BaseUint8ElementwiseWorkload<armnn::MinimumQueueDescriptor, armnn::minimum<float>>;
+
+template class armnn::BaseFloat32ElementwiseWorkload<armnn::EqualQueueDescriptor, std::equal_to<float>>;
+template class armnn::BaseUint8ElementwiseWorkload<armnn::EqualQueueDescriptor, std::equal_to<float>>;
diff --git a/src/backends/reference/workloads/RefElementwiseWorkload.hpp b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
index b520593..c2855b0 100644
--- a/src/backends/reference/workloads/RefElementwiseWorkload.hpp
+++ b/src/backends/reference/workloads/RefElementwiseWorkload.hpp
@@ -147,4 +147,16 @@
                           DataType::QuantisedAsymm8,
                           MinimumQueueDescriptor,
                           StringMapping::RefMinimumWorkload_Execute>;
+
+using RefEqualFloat32Workload =
+    RefElementwiseWorkload<std::equal_to<float>,
+                          DataType::Float32,
+                          EqualQueueDescriptor,
+                          StringMapping::RefEqualWorkload_Execute>;
+
+using RefEqualUint8Workload =
+    RefElementwiseWorkload<std::equal_to<float>,
+                          DataType::QuantisedAsymm8,
+                          EqualQueueDescriptor,
+                          StringMapping::RefEqualWorkload_Execute>;
 } // armnn