Add Queue support

Queues are responsible for scheduling operators and performing other
runtime related activities like for example tuning.

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I0366d9048470d277b8cbf59fa42f95c0ae57c5c9
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5487
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/c/cl/AclOpenClExt.cpp b/src/c/cl/AclOpenClExt.cpp
index ce6d296..e72babc 100644
--- a/src/c/cl/AclOpenClExt.cpp
+++ b/src/c/cl/AclOpenClExt.cpp
@@ -26,6 +26,7 @@
 #include "src/common/ITensorV2.h"
 #include "src/common/Types.h"
 #include "src/gpu/cl/ClContext.h"
+#include "src/gpu/cl/ClQueue.h"
 
 #include "arm_compute/core/CL/ICLTensor.h"
 
@@ -85,6 +86,80 @@
     return AclStatus::AclSuccess;
 }
 
+extern "C" AclStatus AclGetClDevice(AclContext external_ctx, cl_device_id *opencl_device)
+{
+    using namespace arm_compute;
+    IContext *ctx = get_internal(external_ctx);
+
+    if(detail::validate_internal_context(ctx) != StatusCode::Success)
+    {
+        return AclStatus::AclInvalidArgument;
+    }
+
+    if(ctx->type() != Target::GpuOcl)
+    {
+        return AclStatus::AclInvalidTarget;
+    }
+
+    if(opencl_device == nullptr)
+    {
+        return AclStatus::AclInvalidArgument;
+    }
+
+    *opencl_device = utils::cast::polymorphic_downcast<arm_compute::gpu::opencl::ClContext *>(ctx)->cl_dev().get();
+
+    return AclStatus::AclSuccess;
+}
+
+extern "C" AclStatus AclGetClQueue(AclQueue external_queue, cl_command_queue *opencl_queue)
+{
+    using namespace arm_compute;
+    IQueue *queue = get_internal(external_queue);
+
+    if(detail::validate_internal_queue(queue) != StatusCode::Success)
+    {
+        return AclStatus::AclInvalidArgument;
+    }
+
+    if(queue->header.ctx->type() != Target::GpuOcl)
+    {
+        return AclStatus::AclInvalidTarget;
+    }
+
+    if(opencl_queue == nullptr)
+    {
+        return AclStatus::AclInvalidArgument;
+    }
+
+    *opencl_queue = utils::cast::polymorphic_downcast<arm_compute::gpu::opencl::ClQueue *>(queue)->cl_queue().get();
+
+    return AclStatus::AclSuccess;
+}
+
+extern "C" AclStatus AclSetClQueue(AclQueue external_queue, cl_command_queue opencl_queue)
+{
+    using namespace arm_compute;
+    IQueue *queue = get_internal(external_queue);
+
+    if(detail::validate_internal_queue(queue) != StatusCode::Success)
+    {
+        return AclStatus::AclInvalidArgument;
+    }
+
+    if(queue->header.ctx->type() != Target::GpuOcl)
+    {
+        return AclStatus::AclInvalidTarget;
+    }
+
+    auto cl_queue = utils::cast::polymorphic_downcast<arm_compute::gpu::opencl::ClQueue *>(queue);
+    if(!cl_queue->set_cl_queue(::cl::CommandQueue(opencl_queue)))
+    {
+        return AclStatus::AclRuntimeError;
+    }
+
+    return AclStatus::AclSuccess;
+}
+
 extern "C" AclStatus AclGetClMem(AclTensor external_tensor, cl_mem *opencl_mem)
 {
     using namespace arm_compute;