Add tensor related data structures for the new API

Adds the following:
 - TensorDescriptor: which is responsible for holding the information
 needed to represent a tensor (e.g. shape, dimensions, etc)
 - Tensor: an aggreate object of a descriptor and a backing memory
 - TensorPack: A map of tensor that can be passed to operators as
 inputs/outputs

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I02734ac6ad85700d91d6e73217b4637adbf5d177
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5260
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/gpu/cl/ClContext.cpp b/src/gpu/cl/ClContext.cpp
index 2bd8b8d..2e04e1d 100644
--- a/src/gpu/cl/ClContext.cpp
+++ b/src/gpu/cl/ClContext.cpp
@@ -23,6 +23,8 @@
  */
 #include "src/gpu/cl/ClContext.h"
 
+#include "src/gpu/cl/ClTensor.h"
+
 namespace arm_compute
 {
 namespace gpu
@@ -33,8 +35,13 @@
 {
 mlgo::MLGOHeuristics populate_mlgo(const char *filename)
 {
+    bool                 status = false;
     mlgo::MLGOHeuristics heuristics;
-    bool                 status = heuristics.reload_from_file(filename);
+
+    if(filename != nullptr)
+    {
+        status = heuristics.reload_from_file(filename);
+    }
     return status ? std::move(heuristics) : mlgo::MLGOHeuristics();
 }
 } // namespace
@@ -69,6 +76,16 @@
     }
     return false;
 }
+
+ITensorV2 *ClContext::create_tensor(const AclTensorDescriptor &desc, bool allocate)
+{
+    ClTensor *tensor = new ClTensor(this, desc);
+    if(tensor != nullptr && allocate)
+    {
+        tensor->allocate();
+    }
+    return tensor;
+}
 } // namespace opencl
 } // namespace gpu
 } // namespace arm_compute
diff --git a/src/gpu/cl/ClContext.h b/src/gpu/cl/ClContext.h
index e3f16b1..dd6699a 100644
--- a/src/gpu/cl/ClContext.h
+++ b/src/gpu/cl/ClContext.h
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef SRC_GPU_CL_CPUCONTEXT_H
-#define SRC_GPU_CL_CPUCONTEXT_H
+#ifndef SRC_GPU_CLCONTEXT_H
+#define SRC_GPU_CLCONTEXT_H
 
 #include "src/common/IContext.h"
 #include "src/runtime/CL/mlgo/MLGOHeuristics.h"
@@ -65,6 +65,9 @@
      */
     bool set_cl_ctx(::cl::Context ctx);
 
+    // Inherrited methods overridden
+    ITensorV2 *create_tensor(const AclTensorDescriptor &desc, bool allocate) override;
+
 private:
     mlgo::MLGOHeuristics _mlgo_heuristics;
     ::cl::Context        _cl_context;
@@ -73,4 +76,4 @@
 } // namespace gpu
 } // namespace arm_compute
 
-#endif /* SRC_GPU_CL_CPUCONTEXT_H */
\ No newline at end of file
+#endif /* SRC_GPU_CLCONTEXT_H */
\ No newline at end of file
diff --git a/src/gpu/cl/ClTensor.cpp b/src/gpu/cl/ClTensor.cpp
new file mode 100644
index 0000000..db2081c
--- /dev/null
+++ b/src/gpu/cl/ClTensor.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/gpu/cl/ClTensor.h"
+
+#include "src/common/utils/LegacySupport.h"
+
+namespace arm_compute
+{
+namespace gpu
+{
+namespace opencl
+{
+ClTensor::ClTensor(IContext *ctx, const AclTensorDescriptor &desc)
+    : ITensorV2(ctx), _legacy_tensor()
+{
+    ARM_COMPUTE_ASSERT((ctx != nullptr) && (ctx->type() == Target::GpuOcl));
+    _legacy_tensor = std::make_unique<CLTensor>();
+    _legacy_tensor->allocator()->init(arm_compute::detail::convert_to_legacy_tensor_info(desc));
+}
+
+void *ClTensor::map()
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+    if(_legacy_tensor == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[ClTensor:map]: Backing tensor does not exist!");
+        return nullptr;
+    }
+
+    _legacy_tensor->map();
+    return _legacy_tensor->buffer();
+}
+
+StatusCode ClTensor::unmap()
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+    if(_legacy_tensor == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[ClTensor:unmap]: Backing tensor does not exist!");
+        return StatusCode::RuntimeError;
+    }
+    _legacy_tensor->unmap();
+
+    return StatusCode::Success;
+}
+
+StatusCode ClTensor::allocate()
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+    _legacy_tensor->allocator()->allocate();
+    return StatusCode::Success;
+}
+
+StatusCode ClTensor::import(void *handle, ImportMemoryType type)
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+    ARM_COMPUTE_UNUSED(type, handle);
+
+    return StatusCode::Success;
+}
+
+arm_compute::ITensor *ClTensor::tensor()
+{
+    return _legacy_tensor.get();
+}
+} // namespace opencl
+} // namespace gpu
+} // namespace arm_compute
diff --git a/src/gpu/cl/ClTensor.h b/src/gpu/cl/ClTensor.h
new file mode 100644
index 0000000..4188f62
--- /dev/null
+++ b/src/gpu/cl/ClTensor.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_GPU_CLTENSOR_H
+#define SRC_GPU_CLTENSOR_H
+
+#include "src/common/ITensor.h"
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+
+namespace arm_compute
+{
+namespace gpu
+{
+namespace opencl
+{
+/** OpenCL tensor implementation class */
+class ClTensor final : public ITensorV2
+{
+public:
+    /**  Construct a new OpenCL Tensor object
+     *
+     * @param[in] ctx  Context to be used
+     * @param[in] desc Tensor descriptor
+     */
+    ClTensor(IContext *ctx, const AclTensorDescriptor &desc);
+    /** Allocates tensor
+     *
+     * @return StatusCode A status code
+     */
+    StatusCode allocate();
+
+    // Inherrited functions overriden
+    void                 *map() override;
+    StatusCode            unmap() override;
+    arm_compute::ITensor *tensor() override;
+    StatusCode import(void *handle, ImportMemoryType type) override;
+
+private:
+    std::unique_ptr<CLTensor> _legacy_tensor;
+};
+} // namespace opencl
+} // namespace gpu
+} // namespace arm_compute
+
+#endif /* SRC_GPU_CLTENSOR_H */
\ No newline at end of file