IVGCVSW-5092 Add CL Logical workload

* Add CL Logical workloads for NOT,
  AND and OR.
* Enable Layer and IsSupported tests on CL.

Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: I8b7227b2487fdbbb55a4baf6e61f290313947de1
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index cce5c9b..65454d4 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -42,6 +42,9 @@
 #include "workloads/ClInstanceNormalizationWorkload.hpp"
 #include "workloads/ClL2NormalizationFloatWorkload.hpp"
 #include "workloads/ClLogSoftmaxWorkload.hpp"
+#include "workloads/ClLogicalAndWorkload.hpp"
+#include "workloads/ClLogicalNotWorkload.hpp"
+#include "workloads/ClLogicalOrWorkload.hpp"
 #include "workloads/ClLstmFloatWorkload.hpp"
 #include "workloads/ClMaximumWorkload.hpp"
 #include "workloads/ClMeanWorkload.hpp"
@@ -460,6 +463,11 @@
                                            reasonIfUnsupported,
                                            input,
                                            output);
+        case UnaryOperation::LogicalNot:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalNotWorkloadValidate,
+                                           reasonIfUnsupported,
+                                           input,
+                                           output);
         default:
             return false;
     }
@@ -557,6 +565,34 @@
                                    descriptor);
 }
 
+bool ClLayerSupport::IsLogicalBinarySupported(const TensorInfo& input0,
+                                              const TensorInfo& input1,
+                                              const TensorInfo& output,
+                                              const LogicalBinaryDescriptor& descriptor,
+                                              Optional<std::string&> reasonIfUnsupported) const
+{
+    IgnoreUnused(output);
+
+    switch(descriptor.m_Operation)
+    {
+        case LogicalBinaryOperation::LogicalAnd:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalAndWorkloadValidate,
+                                           reasonIfUnsupported,
+                                           input0,
+                                           input1,
+                                           output);
+        case LogicalBinaryOperation::LogicalOr:
+            FORWARD_WORKLOAD_VALIDATE_FUNC(ClLogicalOrWorkloadValidate,
+                                           reasonIfUnsupported,
+                                           input0,
+                                           input1,
+                                           output);
+        default:
+            return false;
+    }
+}
+
+
 bool ClLayerSupport::IsLogSoftmaxSupported(const TensorInfo& input,
                                                 const TensorInfo& output,
                                                 const LogSoftmaxDescriptor& descriptor,
diff --git a/src/backends/cl/ClLayerSupport.hpp b/src/backends/cl/ClLayerSupport.hpp
index d7e2553..f2df94c 100644
--- a/src/backends/cl/ClLayerSupport.hpp
+++ b/src/backends/cl/ClLayerSupport.hpp
@@ -155,6 +155,12 @@
                                     const L2NormalizationDescriptor& descriptor,
                                     Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsLogicalBinarySupported(const TensorInfo& input0,
+                                  const TensorInfo& input1,
+                                  const TensorInfo& output,
+                                  const LogicalBinaryDescriptor& descriptor,
+                                  Optional<std::string&> reasonIfUnsupported) const override;
+
     bool IsLogSoftmaxSupported(const TensorInfo& input,
                                const TensorInfo& output,
                                const LogSoftmaxDescriptor& descriptor,
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index cb4aa92..65121dc 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -281,25 +281,27 @@
     switch(descriptor.m_Parameters.m_Operation)
     {
         case UnaryOperation::Abs:
-             {
-                 AbsQueueDescriptor absQueueDescriptor;
-                 absQueueDescriptor.m_Inputs  = descriptor.m_Inputs;
-                 absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+        {
+            AbsQueueDescriptor absQueueDescriptor;
+            absQueueDescriptor.m_Inputs  = descriptor.m_Inputs;
+            absQueueDescriptor.m_Outputs = descriptor.m_Outputs;
 
-                 return  std::make_unique<ClAbsWorkload>(absQueueDescriptor, info);
-             }
+            return  std::make_unique<ClAbsWorkload>(absQueueDescriptor, info);
+        }
         case UnaryOperation::Exp:
             return std::make_unique<ClExpWorkload>(descriptor, info);
         case UnaryOperation::Neg:
             return std::make_unique<ClNegWorkload>(descriptor, info);
         case UnaryOperation::Rsqrt:
-             {
-                 RsqrtQueueDescriptor rsqrtQueueDescriptor;
-                 rsqrtQueueDescriptor.m_Inputs  = descriptor.m_Inputs;
-                 rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
+        {
+            RsqrtQueueDescriptor rsqrtQueueDescriptor;
+            rsqrtQueueDescriptor.m_Inputs  = descriptor.m_Inputs;
+            rsqrtQueueDescriptor.m_Outputs = descriptor.m_Outputs;
 
-                 return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info);
-             }
+            return std::make_unique<ClRsqrtWorkload>(rsqrtQueueDescriptor, info);
+        }
+        case UnaryOperation::LogicalNot:
+            return std::make_unique<ClLogicalNotWorkload>(descriptor, info);
         default:
             return nullptr;
     }
@@ -370,6 +372,32 @@
     return MakeWorkload<ClL2NormalizationFloatWorkload, NullWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
+                                                                  const WorkloadInfo& info) const
+{
+    switch(descriptor.m_Parameters.m_Operation)
+    {
+        case LogicalBinaryOperation::LogicalAnd:
+            return std::make_unique<ClLogicalAndWorkload>(descriptor, info);
+        case LogicalBinaryOperation::LogicalOr:
+            return std::make_unique<ClLogicalOrWorkload>(descriptor, info);
+        default:
+            return nullptr;
+    }
+}
+
+std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                                 const WorkloadInfo& info) const
+{
+    switch(descriptor.m_Parameters.m_Operation)
+    {
+        case UnaryOperation::LogicalNot:
+            return std::make_unique<ClLogicalNotWorkload>(descriptor, info);
+        default:
+            return nullptr;
+    }
+}
+
 std::unique_ptr<IWorkload> ClWorkloadFactory::CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
                                                                const WorkloadInfo& info) const
 {
diff --git a/src/backends/cl/ClWorkloadFactory.hpp b/src/backends/cl/ClWorkloadFactory.hpp
index fad5dd0..fe5d36c 100644
--- a/src/backends/cl/ClWorkloadFactory.hpp
+++ b/src/backends/cl/ClWorkloadFactory.hpp
@@ -138,6 +138,12 @@
     std::unique_ptr<IWorkload> CreateL2Normalization(const L2NormalizationQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateLogicalBinary(const LogicalBinaryQueueDescriptor& descriptor,
+                                                   const WorkloadInfo& info) const override;
+
+    std::unique_ptr<IWorkload> CreateLogicalUnary(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                                  const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateLogSoftmax(const LogSoftmaxQueueDescriptor& descriptor,
                                                 const WorkloadInfo& info) const override;
 
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 9cbe21e..52295cc 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -46,6 +46,9 @@
         workloads/ClGatherWorkload.cpp \
         workloads/ClInstanceNormalizationWorkload.cpp \
         workloads/ClL2NormalizationFloatWorkload.cpp \
+        workloads/ClLogicalAndWorkload.cpp \
+        workloads/ClLogicalNotWorkload.cpp \
+        workloads/ClLogicalOrWorkload.cpp \
         workloads/ClLogSoftmaxWorkload.cpp \
         workloads/ClLstmFloatWorkload.cpp \
         workloads/ClMaximumWorkload.cpp \
diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp
index 81d0cc2..2b8b0d4 100644
--- a/src/backends/cl/test/ClLayerSupportTests.cpp
+++ b/src/backends/cl/test/ClLayerSupportTests.cpp
@@ -121,6 +121,26 @@
     BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float16");
 }
 
+BOOST_FIXTURE_TEST_CASE(IsLogicalBinarySupportedCl, ClContextControlFixture)
+{
+    std::string reasonIfUnsupported;
+
+    bool result = IsLogicalBinaryLayerSupportedTests<armnn::ClWorkloadFactory,
+      armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
+
+    BOOST_CHECK(result);
+}
+
+BOOST_FIXTURE_TEST_CASE(IsLogicalBinaryBroadcastSupportedCl, ClContextControlFixture)
+{
+    std::string reasonIfUnsupported;
+
+    bool result = IsLogicalBinaryLayerBroadcastSupportedTests<armnn::ClWorkloadFactory,
+      armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
+
+    BOOST_CHECK(result);
+}
+
 BOOST_FIXTURE_TEST_CASE(IsMeanSupportedCl, ClContextControlFixture)
 {
     std::string reasonIfUnsupported;
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 1865bdd..7d40a69 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -1226,6 +1226,22 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(Exp2dFloat16, Exp2dTest<DataType::Float16>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3dFloat16, Exp3dTest<DataType::Float16>)
 
+// Logical
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNot, LogicalNotTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNotInt, LogicalNotIntTest)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAnd, LogicalAndTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndInt, LogicalAndIntTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast1, LogicalAndBroadcast1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast2, LogicalAndBroadcast2Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast3, LogicalAndBroadcast3Test)
+
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOr, LogicalOrTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrInt, LogicalOrIntTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast1, LogicalOrBroadcast1Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast2, LogicalOrBroadcast2Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast3, LogicalOrBroadcast3Test)
+
 #if defined(ARMNNREF_ENABLED)
 
 // The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index 24c09ad..6118d9b 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -50,6 +50,12 @@
     ClInstanceNormalizationWorkload.hpp
     ClL2NormalizationFloatWorkload.cpp
     ClL2NormalizationFloatWorkload.hpp
+    ClLogicalAndWorkload.cpp
+    ClLogicalAndWorkload.hpp
+    ClLogicalNotWorkload.cpp
+    ClLogicalNotWorkload.hpp
+    ClLogicalOrWorkload.cpp
+    ClLogicalOrWorkload.hpp
     ClLogSoftmaxWorkload.cpp
     ClLogSoftmaxWorkload.hpp
     ClLstmFloatWorkload.cpp
diff --git a/src/backends/cl/workloads/ClLogicalAndWorkload.cpp b/src/backends/cl/workloads/ClLogicalAndWorkload.cpp
new file mode 100644
index 0000000..9418d73
--- /dev/null
+++ b/src/backends/cl/workloads/ClLogicalAndWorkload.cpp
@@ -0,0 +1,53 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClLogicalAndWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo& input0,
+                                                 const TensorInfo& input1,
+                                                 const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInputInfo0 = BuildArmComputeTensorInfo(input0);
+    const arm_compute::TensorInfo aclInputInfo1 = BuildArmComputeTensorInfo(input1);
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+    const arm_compute::Status aclStatus = arm_compute::CLLogicalAnd::validate(&aclInputInfo0,
+                                                                              &aclInputInfo1,
+                                                                              &aclOutputInfo);
+    return aclStatus;
+}
+
+ClLogicalAndWorkload::ClLogicalAndWorkload(const LogicalBinaryQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info)
+    : BaseWorkload<LogicalBinaryQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("ClLogicalAndWorkload", 2, 1);
+
+    arm_compute::ICLTensor& input0 = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& input1 = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_LogicalAndLayer.configure(&input0, &input1, &output);
+}
+
+void ClLogicalAndWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClLogicalAndWorkload_Execute");
+    m_LogicalAndLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClLogicalAndWorkload.hpp b/src/backends/cl/workloads/ClLogicalAndWorkload.hpp
new file mode 100644
index 0000000..3bf6afe
--- /dev/null
+++ b/src/backends/cl/workloads/ClLogicalAndWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLLogicalAnd.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClLogicalAndWorkloadValidate(const TensorInfo& input0,
+                                                 const TensorInfo& input1,
+                                                 const TensorInfo& output);
+
+class ClLogicalAndWorkload : public BaseWorkload<LogicalBinaryQueueDescriptor>
+{
+public:
+    ClLogicalAndWorkload(const LogicalBinaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::CLLogicalAnd m_LogicalAndLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClLogicalNotWorkload.cpp b/src/backends/cl/workloads/ClLogicalNotWorkload.cpp
new file mode 100644
index 0000000..eb90caf
--- /dev/null
+++ b/src/backends/cl/workloads/ClLogicalNotWorkload.cpp
@@ -0,0 +1,49 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClLogicalNotWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo& input,
+                                                 const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInputInfo  = BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+    const arm_compute::Status aclStatus = arm_compute::CLLogicalNot::validate(&aclInputInfo,
+                                                                              &aclOutputInfo);
+    return aclStatus;
+}
+
+ClLogicalNotWorkload::ClLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info)
+    : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("ClLogicalNotWorkload", 1, 1);
+
+    arm_compute::ICLTensor& input  = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_LogicalNotLayer.configure(&input, &output);
+}
+
+void ClLogicalNotWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClLogicalNotWorkload_Execute");
+    m_LogicalNotLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClLogicalNotWorkload.hpp b/src/backends/cl/workloads/ClLogicalNotWorkload.hpp
new file mode 100644
index 0000000..f1225c7
--- /dev/null
+++ b/src/backends/cl/workloads/ClLogicalNotWorkload.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLLogicalNot.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClLogicalNotWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class ClLogicalNotWorkload : public BaseWorkload<ElementwiseUnaryQueueDescriptor>
+{
+public:
+    ClLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::CLLogicalNot m_LogicalNotLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClLogicalOrWorkload.cpp b/src/backends/cl/workloads/ClLogicalOrWorkload.cpp
new file mode 100644
index 0000000..e9895bf
--- /dev/null
+++ b/src/backends/cl/workloads/ClLogicalOrWorkload.cpp
@@ -0,0 +1,53 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClLogicalOrWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <armnn/utility/PolymorphicDowncast.hpp>
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+
+#include <cl/ClTensorHandle.hpp>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInputInfo0 = BuildArmComputeTensorInfo(input0);
+    const arm_compute::TensorInfo aclInputInfo1 = BuildArmComputeTensorInfo(input1);
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+    const arm_compute::Status aclStatus = arm_compute::CLLogicalOr::validate(&aclInputInfo0,
+                                                                             &aclInputInfo1,
+                                                                             &aclOutputInfo);
+    return aclStatus;
+}
+
+ClLogicalOrWorkload::ClLogicalOrWorkload(const LogicalBinaryQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info)
+    : BaseWorkload<LogicalBinaryQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("ClLogicalOrWorkload", 2, 1);
+
+    arm_compute::ICLTensor& input0 = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ICLTensor& input1 = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ICLTensor& output = PolymorphicDowncast<ClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_LogicalOrLayer.configure(&input0, &input1, &output);
+}
+
+void ClLogicalOrWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_CL("ClLogicalOrWorkload_Execute");
+    m_LogicalOrLayer.run();
+}
+
+} // namespace armnn
diff --git a/src/backends/cl/workloads/ClLogicalOrWorkload.hpp b/src/backends/cl/workloads/ClLogicalOrWorkload.hpp
new file mode 100644
index 0000000..8faabde
--- /dev/null
+++ b/src/backends/cl/workloads/ClLogicalOrWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <arm_compute/core/Error.h>
+#include <arm_compute/runtime/CL/functions/CLLogicalOr.h>
+
+namespace armnn
+{
+
+arm_compute::Status ClLogicalOrWorkloadValidate(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output);
+
+class ClLogicalOrWorkload : public BaseWorkload<LogicalBinaryQueueDescriptor>
+{
+public:
+    ClLogicalOrWorkload(const LogicalBinaryQueueDescriptor& descriptor, const WorkloadInfo& info);
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::CLLogicalOr m_LogicalOrLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClWorkloads.hpp b/src/backends/cl/workloads/ClWorkloads.hpp
index b48e5a6..efcccb3 100644
--- a/src/backends/cl/workloads/ClWorkloads.hpp
+++ b/src/backends/cl/workloads/ClWorkloads.hpp
@@ -24,6 +24,9 @@
 #include "ClGatherWorkload.hpp"
 #include "ClInstanceNormalizationWorkload.hpp"
 #include "ClL2NormalizationFloatWorkload.hpp"
+#include "ClLogicalAndWorkload.hpp"
+#include "ClLogicalNotWorkload.hpp"
+#include "ClLogicalOrWorkload.hpp"
 #include "ClLogSoftmaxWorkload.hpp"
 #include "ClLstmFloatWorkload.hpp"
 #include "ClConcatWorkload.hpp"