Add tensor related data structures for the new API

Adds the following:
 - TensorDescriptor: which is responsible for holding the information
 needed to represent a tensor (e.g. shape, dimensions, etc)
 - Tensor: an aggreate object of a descriptor and a backing memory
 - TensorPack: A map of tensor that can be passed to operators as
 inputs/outputs

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I02734ac6ad85700d91d6e73217b4637adbf5d177
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5260
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/c/AclContext.cpp b/src/c/AclContext.cpp
index e88995b..bff70f3 100644
--- a/src/c/AclContext.cpp
+++ b/src/c/AclContext.cpp
@@ -79,20 +79,20 @@
 {
     if(!is_target_valid(target))
     {
-        ARM_COMPUTE_LOG_ERROR_ACL("Target is invalid");
+        ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Target is invalid!");
         return AclUnsupportedTarget;
     }
 
     if(options != nullptr && !are_context_options_valid(options))
     {
-        ARM_COMPUTE_LOG_ERROR_ACL("Context options are invalid");
+        ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Context options are invalid!");
         return AclInvalidArgument;
     }
 
     auto acl_ctx = create_context(target, options);
     if(ctx == nullptr)
     {
-        ARM_COMPUTE_LOG_ERROR_ACL("Couldn't allocate internal resources for context creation");
+        ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Couldn't allocate internal resources for context creation!");
         return AclOutOfMemory;
     }
     *ctx = acl_ctx;
@@ -106,13 +106,12 @@
 
     IContext *ctx = get_internal(external_ctx);
 
-    StatusCode status = StatusCode::Success;
-    status            = detail::validate_internal_context(ctx);
+    StatusCode status = detail::validate_internal_context(ctx);
     ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
 
     if(ctx->refcount() != 0)
     {
-        ARM_COMPUTE_LOG_ERROR_ACL("Context has references on it that haven't been released");
+        ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Context has references on it that haven't been released!");
         // TODO: Fix the refcount with callback when reaches 0
     }
 
diff --git a/src/c/AclTensor.cpp b/src/c/AclTensor.cpp
new file mode 100644
index 0000000..58b17ff
--- /dev/null
+++ b/src/c/AclTensor.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/AclEntrypoints.h"
+#include "src/common/ITensor.h"
+#include "src/common/utils/Macros.h"
+
+namespace
+{
+/**< Maximum allowed dimensions by Compute Library */
+constexpr int32_t max_allowed_dims = 6;
+
+/** Check if a descriptor is valid
+ *
+ * @param desc  Descriptor to validate
+ *
+ * @return true in case of success else false
+ */
+bool is_desc_valid(const AclTensorDescriptor &desc)
+{
+    if(desc.data_type > AclFloat32)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Unknown data type!");
+        return false;
+    }
+    if(desc.ndims > max_allowed_dims)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Dimensions surpass the maximum allowed value!");
+        return false;
+    }
+    if(desc.ndims > 0 && desc.shape == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Dimensions values are empty while dimensionality is > 0!");
+        return false;
+    }
+    return true;
+}
+} // namespace
+
+extern "C" AclStatus AclCreateTensor(AclTensor                 *external_tensor,
+                                     AclContext                 external_ctx,
+                                     const AclTensorDescriptor *desc,
+                                     bool                       allocate)
+{
+    using namespace arm_compute;
+
+    IContext *ctx = get_internal(external_ctx);
+
+    StatusCode status = detail::validate_internal_context(ctx);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+    if(desc == nullptr || !is_desc_valid(*desc))
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Descriptor is invalid!");
+        return AclInvalidArgument;
+    }
+
+    auto tensor = ctx->create_tensor(*desc, allocate);
+    if(tensor == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[AclCreateTensor]: Couldn't allocate internal resources for tensor creation!");
+        return AclOutOfMemory;
+    }
+    *external_tensor = tensor;
+
+    return AclSuccess;
+}
+
+extern "C" AclStatus AclMapTensor(AclTensor external_tensor, void **handle)
+{
+    using namespace arm_compute;
+
+    auto       tensor = get_internal(external_tensor);
+    StatusCode status = detail::validate_internal_tensor(tensor);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+    if(handle == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[AclMapTensor]: Handle object is nullptr!");
+        return AclInvalidArgument;
+    }
+
+    *handle = tensor->map();
+
+    return AclSuccess;
+}
+
+extern "C" AclStatus AclUnmapTensor(AclTensor external_tensor, void *handle)
+{
+    ARM_COMPUTE_UNUSED(handle);
+
+    using namespace arm_compute;
+
+    auto       tensor = get_internal(external_tensor);
+    StatusCode status = detail::validate_internal_tensor(tensor);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+    status = tensor->unmap();
+    return AclSuccess;
+}
+
+extern "C" AclStatus AclTensorImport(AclTensor external_tensor, void *handle, AclImportMemoryType type)
+{
+    using namespace arm_compute;
+
+    auto       tensor = get_internal(external_tensor);
+    StatusCode status = detail::validate_internal_tensor(tensor);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+    status = tensor->import(handle, utils::as_enum<ImportMemoryType>(type));
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+    return AclSuccess;
+}
+
+extern "C" AclStatus AclDestroyTensor(AclTensor external_tensor)
+{
+    using namespace arm_compute;
+
+    auto tensor = get_internal(external_tensor);
+
+    StatusCode status = detail::validate_internal_tensor(tensor);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+    delete tensor;
+
+    return AclSuccess;
+}
diff --git a/src/c/AclTensorPack.cpp b/src/c/AclTensorPack.cpp
new file mode 100644
index 0000000..6700ef4
--- /dev/null
+++ b/src/c/AclTensorPack.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/AclEntrypoints.h"
+#include "src/common/ITensor.h"
+#include "src/common/TensorPack.h"
+#include "src/common/utils/Macros.h"
+
+namespace
+{
+using namespace arm_compute;
+StatusCode PackTensorInternal(TensorPack &pack, AclTensor external_tensor, int32_t slot_id)
+{
+    auto status = StatusCode::Success;
+    auto tensor = get_internal(external_tensor);
+
+    status = detail::validate_internal_tensor(tensor);
+
+    if(status != StatusCode::Success)
+    {
+        return status;
+    }
+
+    pack.add_tensor(tensor, slot_id);
+
+    return status;
+}
+} // namespace
+
+extern "C" AclStatus AclCreateTensorPack(AclTensorPack *external_pack, AclContext external_ctx)
+{
+    using namespace arm_compute;
+
+    IContext *ctx = get_internal(external_ctx);
+
+    const StatusCode status = detail::validate_internal_context(ctx);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+    auto pack = new TensorPack(ctx);
+    if(pack == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL("Couldn't allocate internal resources!");
+        return AclOutOfMemory;
+    }
+    *external_pack = pack;
+
+    return AclSuccess;
+}
+
+extern "C" AclStatus AclPackTensor(AclTensorPack external_pack, AclTensor external_tensor, int32_t slot_id)
+{
+    using namespace arm_compute;
+
+    auto pack = get_internal(external_pack);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(detail::validate_internal_pack(pack));
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(PackTensorInternal(*pack, external_tensor, slot_id));
+    return AclStatus::AclSuccess;
+}
+
+extern "C" AclStatus AclPackTensors(AclTensorPack external_pack, AclTensor *external_tensors, int32_t *slot_ids, size_t num_tensors)
+{
+    using namespace arm_compute;
+
+    auto pack = get_internal(external_pack);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(detail::validate_internal_pack(pack));
+
+    for(unsigned i = 0; i < num_tensors; ++i)
+    {
+        ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(PackTensorInternal(*pack, external_tensors[i], slot_ids[i]));
+    }
+    return AclStatus::AclSuccess;
+}
+
+extern "C" AclStatus AclDestroyTensorPack(AclTensorPack external_pack)
+{
+    using namespace arm_compute;
+
+    auto       pack   = get_internal(external_pack);
+    StatusCode status = detail::validate_internal_pack(pack);
+    ARM_COMPUTE_RETURN_CENUM_ON_FAILURE(status);
+
+    delete pack;
+
+    return AclSuccess;
+}
diff --git a/src/c/cl/AclOpenClExt.cpp b/src/c/cl/AclOpenClExt.cpp
index 5f2bb47..a144f97 100644
--- a/src/c/cl/AclOpenClExt.cpp
+++ b/src/c/cl/AclOpenClExt.cpp
@@ -23,9 +23,12 @@
  */
 #include "arm_compute/AclOpenClExt.h"
 
+#include "src/common/ITensor.h"
 #include "src/common/Types.h"
 #include "src/gpu/cl/ClContext.h"
 
+#include "arm_compute/core/CL/ICLTensor.h"
+
 #include "support/Cast.h"
 
 extern "C" AclStatus AclGetClContext(AclContext external_ctx, cl_context *opencl_context)
@@ -80,4 +83,30 @@
     }
 
     return AclStatus::AclSuccess;
+}
+
+extern "C" AclStatus AclGetClMem(AclTensor external_tensor, cl_mem *opencl_mem)
+{
+    using namespace arm_compute;
+    ITensorV2 *tensor = get_internal(external_tensor);
+
+    if(detail::validate_internal_tensor(tensor) != StatusCode::Success)
+    {
+        return AclStatus::AclInvalidArgument;
+    }
+
+    if(tensor->header.ctx->type() != Target::GpuOcl)
+    {
+        return AclStatus::AclInvalidTarget;
+    }
+
+    if(opencl_mem == nullptr)
+    {
+        return AclStatus::AclInvalidArgument;
+    }
+
+    auto cl_tensor = utils::cast::polymorphic_downcast<arm_compute::ICLTensor *>(tensor->tensor());
+    *opencl_mem    = cl_tensor->cl_buffer().get();
+
+    return AclStatus::AclSuccess;
 }
\ No newline at end of file
diff --git a/src/common/IContext.h b/src/common/IContext.h
index 0d23abd..ee23479 100644
--- a/src/common/IContext.h
+++ b/src/common/IContext.h
@@ -41,6 +41,9 @@
 
 namespace arm_compute
 {
+// Forward declarations
+class ITensorV2;
+
 /**< Context interface */
 class IContext : public AclContext_
 {
@@ -88,6 +91,14 @@
     {
         return header.type == detail::ObjectType::Context;
     }
+    /** Create a tensor object
+     *
+     * @param[in] desc     Descriptor to use
+     * @param[in] allocate Flag to allocate tensor
+     *
+     * @return A pointer to the created tensor object
+     */
+    virtual ITensorV2 *create_tensor(const AclTensorDescriptor &desc, bool allocate) = 0;
 
 private:
     Target                   _target;   /**< Target type of context */
diff --git a/src/common/ITensor.h b/src/common/ITensor.h
new file mode 100644
index 0000000..ee7eac7
--- /dev/null
+++ b/src/common/ITensor.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_COMMON_ITENSOR_H_
+#define SRC_COMMON_ITENSOR_H_
+
+#include "src/common/IContext.h"
+#include "src/common/utils/Validate.h"
+
+struct AclTensor_
+{
+    arm_compute::detail::Header header{ arm_compute::detail::ObjectType::Tensor, nullptr };
+
+protected:
+    AclTensor_()  = default;
+    ~AclTensor_() = default;
+};
+
+namespace arm_compute
+{
+// Forward declaration
+class ITensor;
+
+/** Base class specifying the tensor interface */
+class ITensorV2 : public AclTensor_
+{
+public:
+    /** Explict Operator Constructor
+     *
+     * @param[in] ctx Context to be used by the operator
+     */
+    explicit ITensorV2(IContext *ctx)
+        : AclTensor_()
+    {
+        ARM_COMPUTE_ASSERT_NOT_NULLPTR(ctx);
+        this->header.ctx = ctx;
+        this->header.ctx->inc_ref();
+    }
+    /** Destructor */
+    virtual ~ITensorV2()
+    {
+        this->header.ctx->dec_ref();
+        this->header.type = detail::ObjectType::Invalid;
+    };
+    /** Checks if a queue is valid
+     *
+     * @return True if successful otherwise false
+     */
+    bool is_valid() const
+    {
+        return this->header.type == detail::ObjectType::Tensor;
+    };
+    /** Map tensor to a host pointer
+     *
+     * @return A pointer to the underlying backing memory if successful else nullptr
+     */
+    virtual void *map() = 0;
+    /** Unmap tensor
+     *
+     * @return AclStatus A status cod
+     */
+    virtual StatusCode unmap() = 0;
+    /** Import external memory handle
+     *
+     * @param[in] handle Memory to import
+     * @param[in] type   Type of imported memory
+     *
+     * @return Status code
+     */
+    virtual StatusCode import(void *handle, ImportMemoryType type) = 0;
+    /** Get the legacy tensor object
+     *
+     * @return The legacy underlying tensor object
+     */
+    virtual arm_compute::ITensor *tensor() = 0;
+};
+
+/** Extract internal representation of a Tensor
+ *
+ * @param[in] tensor Opaque tensor pointer
+ *
+ * @return The internal representation as an ITensor
+ */
+inline ITensorV2 *get_internal(AclTensor tensor)
+{
+    return static_cast<ITensorV2 *>(tensor);
+}
+
+namespace detail
+{
+/** Check if an internal tensor is valid
+ *
+ * @param[in] tensor Internal tensor to check
+ *
+ * @return A status code
+ */
+inline StatusCode validate_internal_tensor(const ITensorV2 *tensor)
+{
+    if(tensor == nullptr || !tensor->is_valid())
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[ITensorV2]: Invalid tensor object");
+        return StatusCode::InvalidArgument;
+    }
+    return StatusCode::Success;
+}
+} // namespace detail
+} // namespace arm_compute
+#endif /* SRC_COMMON_ITENSOR_H_ */
diff --git a/src/common/TensorPack.cpp b/src/common/TensorPack.cpp
new file mode 100644
index 0000000..c582c7b
--- /dev/null
+++ b/src/common/TensorPack.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/common/TensorPack.h"
+#include "src/common/ITensor.h"
+#include "src/common/utils/Validate.h"
+
+namespace arm_compute
+{
+TensorPack::TensorPack(IContext *ctx)
+    : AclTensorPack_(), _pack()
+{
+    ARM_COMPUTE_ASSERT_NOT_NULLPTR(ctx);
+    this->header.ctx = ctx;
+    this->header.ctx->inc_ref();
+}
+
+TensorPack::~TensorPack()
+{
+    this->header.ctx->dec_ref();
+    this->header.type = detail::ObjectType::Invalid;
+}
+
+AclStatus TensorPack::add_tensor(ITensorV2 *tensor, int32_t slot_id)
+{
+    _pack.add_tensor(slot_id, tensor->tensor());
+    return AclStatus::AclSuccess;
+}
+
+size_t TensorPack::size() const
+{
+    return _pack.size();
+}
+
+bool TensorPack::empty() const
+{
+    return _pack.empty();
+}
+
+bool TensorPack::is_valid() const
+{
+    return this->header.type == detail::ObjectType::TensorPack;
+}
+
+arm_compute::ITensor *TensorPack::get_tensor(int32_t slot_id)
+{
+    return _pack.get_tensor(slot_id);
+}
+
+arm_compute::ITensorPack &TensorPack::get_tensor_pack()
+{
+    return _pack;
+}
+} // namespace arm_compute
diff --git a/src/common/TensorPack.h b/src/common/TensorPack.h
new file mode 100644
index 0000000..f330eee
--- /dev/null
+++ b/src/common/TensorPack.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_COMMON_ITENSORPACK_H_
+#define SRC_COMMON_ITENSORPACK_H_
+
+#include "arm_compute/core/ITensorPack.h"
+#include "src/common/IContext.h"
+
+struct AclTensorPack_
+{
+    arm_compute::detail::Header header{ arm_compute::detail::ObjectType::TensorPack, nullptr };
+
+protected:
+    AclTensorPack_()  = default;
+    ~AclTensorPack_() = default;
+};
+
+namespace arm_compute
+{
+// Forward declaration
+class ITensor;
+class ITensorV2;
+
+/** Tensor packing service
+ *
+ * Class is responsible for creating and managing a collection of tensors.
+ * Tensor packs can be passed to operators to be part of the mutable data of the execution.
+ */
+class TensorPack : public AclTensorPack_
+{
+public:
+    /** Constructor
+     *
+     * @param[in] ctx Context to be used
+     */
+    explicit TensorPack(IContext *ctx);
+    /** Destructor */
+    ~TensorPack();
+    /** Add tensor to the pack
+     *
+     * @param[in] tensor  Tensor to add
+     * @param[in] slot_id Slot identification in respect to the operator of the tensor to add
+     *
+     * @return Status code
+     */
+    AclStatus add_tensor(ITensorV2 *tensor, int32_t slot_id);
+    /** Pack size accessor
+     *
+     * @return Number of tensors registered to the pack
+     */
+    size_t size() const;
+    /** Checks if pack is empty
+     *
+     * @return True if empty else false
+     */
+    bool empty() const;
+    /** Checks if an object is valid
+     *
+     * @return True if valid else false
+     */
+    bool is_valid() const;
+    /** Get tensor of a given id from the pac
+     *
+     * @param[in] slot_id Slot identification of tensor to extract
+     *
+     * @return The pointer to the tensor if exist and is non-const else nullptr
+     */
+    arm_compute::ITensor *get_tensor(int32_t slot_id);
+    /** Get legacy tensor pack
+     *
+     * @return Legacy tensor pack
+     */
+    arm_compute::ITensorPack &get_tensor_pack();
+
+private:
+    arm_compute::ITensorPack _pack; /**< Pack that currently redirects to the existing TensorPack */
+};
+
+/** Extract internal representation of a TensoPack
+ *
+ * @param[in] pack Opaque tensor pack pointer
+ *
+ * @return The internal representation as an TensorPack
+ */
+inline TensorPack *get_internal(AclTensorPack pack)
+{
+    return static_cast<TensorPack *>(pack);
+}
+
+namespace detail
+{
+/** Check if an internal TensorPack is valid
+ *
+ * @param[in] pack Internal tensor pack to check
+ *
+ * @return A status code
+ */
+inline StatusCode validate_internal_pack(const TensorPack *pack)
+{
+    if(pack == nullptr || !pack->is_valid())
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[TensorPack]: Invalid tensor pack object");
+        return StatusCode::InvalidArgument;
+    }
+    return StatusCode::Success;
+}
+} // namespace detail
+} // namespace arm_compute
+#endif /* SRC_COMMON_ITENSORPACK_H_ */
diff --git a/src/common/Types.h b/src/common/Types.h
index 60a11b0..ba07b51 100644
--- a/src/common/Types.h
+++ b/src/common/Types.h
@@ -52,5 +52,10 @@
     FastRerun = AclPreferFastRerun,
     FastStart = AclPreferFastStart,
 };
+
+enum class ImportMemoryType
+{
+    HostPtr = AclImportMemoryType::AclHostPtr
+};
 } // namespace arm_compute
 #endif /* SRC_COMMON_TYPES_H_ */
diff --git a/src/common/utils/LegacySupport.cpp b/src/common/utils/LegacySupport.cpp
new file mode 100644
index 0000000..5981c65
--- /dev/null
+++ b/src/common/utils/LegacySupport.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/common/utils/LegacySupport.h"
+
+namespace arm_compute
+{
+namespace detail
+{
+namespace
+{
+DataType data_type_mapper(AclDataType data_type)
+{
+    switch(data_type)
+    {
+        case AclDataType::AclFloat32:
+            return DataType::F32;
+        case AclDataType::AclFloat16:
+            return DataType::F16;
+        case AclDataType::AclBFloat16:
+            return DataType::BFLOAT16;
+        default:
+            return DataType::UNKNOWN;
+            ;
+    }
+}
+
+TensorShape tensor_shape_mapper(int32_t ndims, int32_t *shape)
+{
+    TensorShape legacy_shape{};
+    for(int32_t d = 0; d < ndims; ++d)
+    {
+        legacy_shape.set(d, shape[d], false);
+    }
+    return legacy_shape;
+}
+} // namespace
+
+TensorInfo convert_to_legacy_tensor_info(const AclTensorDescriptor &desc)
+{
+    TensorInfo legacy_desc;
+    legacy_desc.init(tensor_shape_mapper(desc.ndims, desc.shape), 1, data_type_mapper(desc.data_type));
+    return legacy_desc;
+}
+} // namespace detail
+} // namespace arm_compute
diff --git a/src/common/utils/LegacySupport.h b/src/common/utils/LegacySupport.h
new file mode 100644
index 0000000..37329b7
--- /dev/null
+++ b/src/common/utils/LegacySupport.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_COMMON_LEGACY_SUPPORT_H
+#define SRC_COMMON_LEGACY_SUPPORT_H
+
+#include "arm_compute/Acl.h"
+#include "arm_compute/core/TensorInfo.h"
+
+namespace arm_compute
+{
+namespace detail
+{
+/** Convert a descriptor to a legacy format one
+ *
+ * @param[in] desc Descriptor to convert
+ *
+ * @return Legacy tensor meta-data
+ */
+TensorInfo convert_to_legacy_tensor_info(const AclTensorDescriptor &desc);
+} // namespace detail
+} // namespace arm_compute
+
+#endif /* SRC_COMMON_LEGACY_SUPPORT_H */
diff --git a/src/common/utils/Log.h b/src/common/utils/Log.h
index 0d6a50d..496ee74 100644
--- a/src/common/utils/Log.h
+++ b/src/common/utils/Log.h
@@ -77,4 +77,15 @@
         ARM_COMPUTE_LOG_MSG("ComputeLibrary", arm_compute::logging::LogLevel::ERROR, msg); \
     } while(false)
 
+/** Log an error message to the logger with function name before the message
+ *
+ * @param[in] msg Message to log
+ */
+#define ARM_COMPUTE_LOG_ERROR_WITH_FUNCNAME_ACL(msg)                                                     \
+    do                                                                                                   \
+    {                                                                                                    \
+        ARM_COMPUTE_CREATE_ACL_LOGGER();                                                                 \
+        ARM_COMPUTE_LOG_MSG_WITH_FUNCNAME("ComputeLibrary", arm_compute::logging::LogLevel::ERROR, msg); \
+    } while(false)
+
 #endif /* SRC_COMMON_LOG_H */
diff --git a/src/common/utils/Utils.h b/src/common/utils/Utils.h
index 9602c32..87be9df 100644
--- a/src/common/utils/Utils.h
+++ b/src/common/utils/Utils.h
@@ -44,6 +44,22 @@
 {
     return static_cast<E>(static_cast<std::underlying_type_t<SE>>(v));
 }
+
+/** Convert plain old enumeration to a strongly typed enum
+ *
+ * @tparam SE Strongly typed resulting enum
+ * @tparam E  Plain old C enum
+ *
+ * @param[in] val Value to convert
+ *
+ * @return A corresponding strongly typed enumeration
+ */
+template <typename SE, typename E>
+constexpr SE as_enum(E val) noexcept
+{
+    return static_cast<SE>(val);
+}
+
 /** Check if the given value is in the given enum value list
  *
  * @tparam E  The type of the enum
diff --git a/src/cpu/CpuContext.cpp b/src/cpu/CpuContext.cpp
index 6ff3560..d62c1b6 100644
--- a/src/cpu/CpuContext.cpp
+++ b/src/cpu/CpuContext.cpp
@@ -24,6 +24,7 @@
 #include "src/cpu/CpuContext.h"
 
 #include "arm_compute/core/CPP/CPPTypes.h"
+#include "src/cpu/CpuTensor.h"
 #include "src/runtime/CPUUtils.h"
 
 #include <cstdlib>
@@ -185,5 +186,15 @@
 {
     return _allocator;
 }
+
+ITensorV2 *CpuContext::create_tensor(const AclTensorDescriptor &desc, bool allocate)
+{
+    CpuTensor *tensor = new CpuTensor(this, desc);
+    if(tensor != nullptr && allocate)
+    {
+        tensor->allocate();
+    }
+    return tensor;
+}
 } // namespace cpu
 } // namespace arm_compute
diff --git a/src/cpu/CpuContext.h b/src/cpu/CpuContext.h
index 81bab97..d2062e4 100644
--- a/src/cpu/CpuContext.h
+++ b/src/cpu/CpuContext.h
@@ -67,6 +67,9 @@
      */
     AllocatorWrapper &allocator();
 
+    // Inherrited methods overridden
+    ITensorV2 *create_tensor(const AclTensorDescriptor &desc, bool allocate) override;
+
 private:
     AllocatorWrapper _allocator;
     CpuCapabilities  _caps;
diff --git a/src/cpu/CpuTensor.cpp b/src/cpu/CpuTensor.cpp
new file mode 100644
index 0000000..79dc812
--- /dev/null
+++ b/src/cpu/CpuTensor.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/cpu/CpuTensor.h"
+
+#include "src/common/utils/LegacySupport.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuTensor::CpuTensor(IContext *ctx, const AclTensorDescriptor &desc)
+    : ITensorV2(ctx), _legacy_tensor()
+{
+    ARM_COMPUTE_ASSERT((ctx != nullptr) && (ctx->type() == Target::Cpu));
+    _legacy_tensor = std::make_unique<Tensor>();
+    _legacy_tensor->allocator()->init(arm_compute::detail::convert_to_legacy_tensor_info(desc));
+}
+
+void *CpuTensor::map()
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+    if(_legacy_tensor == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[CpuTensor:map]: Backing tensor does not exist!");
+        return nullptr;
+    }
+    return _legacy_tensor->buffer();
+}
+
+StatusCode CpuTensor::allocate()
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+    _legacy_tensor->allocator()->allocate();
+    return StatusCode::Success;
+}
+
+StatusCode CpuTensor::unmap()
+{
+    // No-op
+    return StatusCode::Success;
+}
+
+StatusCode CpuTensor::import(void *handle, ImportMemoryType type)
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+    ARM_COMPUTE_UNUSED(type);
+
+    const auto st = _legacy_tensor->allocator()->import_memory(handle);
+    return bool(st) ? StatusCode::Success : StatusCode::RuntimeError;
+}
+
+arm_compute::ITensor *CpuTensor::tensor()
+{
+    return _legacy_tensor.get();
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/cpu/CpuTensor.h b/src/cpu/CpuTensor.h
new file mode 100644
index 0000000..a46f1a2
--- /dev/null
+++ b/src/cpu/CpuTensor.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_CPU_CPUTENSOR_H
+#define SRC_CPU_CPUTENSOR_H
+
+#include "src/common/ITensor.h"
+
+#include "arm_compute/runtime/Tensor.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** CPU tensor implementation class */
+class CpuTensor final : public ITensorV2
+{
+public:
+    /**  Construct a new Cpu Tensor object
+     *
+     * @param[in] ctx  Context to be used
+     * @param[in] desc Tensor descriptor
+     */
+    CpuTensor(IContext *ctx, const AclTensorDescriptor &desc);
+    /** Allocates tensor
+     *
+     * @return StatusCode A status code
+     */
+    StatusCode allocate();
+
+    // Inherrited functions overriden
+    void                 *map() override;
+    StatusCode            unmap() override;
+    arm_compute::ITensor *tensor() override;
+    StatusCode import(void *handle, ImportMemoryType type) override;
+
+private:
+    std::unique_ptr<Tensor> _legacy_tensor;
+};
+} // namespace cpu
+} // namespace arm_compute
+
+#endif /* SRC_CPU_CPUTENSOR_H */
\ No newline at end of file
diff --git a/src/gpu/cl/ClContext.cpp b/src/gpu/cl/ClContext.cpp
index 2bd8b8d..2e04e1d 100644
--- a/src/gpu/cl/ClContext.cpp
+++ b/src/gpu/cl/ClContext.cpp
@@ -23,6 +23,8 @@
  */
 #include "src/gpu/cl/ClContext.h"
 
+#include "src/gpu/cl/ClTensor.h"
+
 namespace arm_compute
 {
 namespace gpu
@@ -33,8 +35,13 @@
 {
 mlgo::MLGOHeuristics populate_mlgo(const char *filename)
 {
+    bool                 status = false;
     mlgo::MLGOHeuristics heuristics;
-    bool                 status = heuristics.reload_from_file(filename);
+
+    if(filename != nullptr)
+    {
+        status = heuristics.reload_from_file(filename);
+    }
     return status ? std::move(heuristics) : mlgo::MLGOHeuristics();
 }
 } // namespace
@@ -69,6 +76,16 @@
     }
     return false;
 }
+
+ITensorV2 *ClContext::create_tensor(const AclTensorDescriptor &desc, bool allocate)
+{
+    ClTensor *tensor = new ClTensor(this, desc);
+    if(tensor != nullptr && allocate)
+    {
+        tensor->allocate();
+    }
+    return tensor;
+}
 } // namespace opencl
 } // namespace gpu
 } // namespace arm_compute
diff --git a/src/gpu/cl/ClContext.h b/src/gpu/cl/ClContext.h
index e3f16b1..dd6699a 100644
--- a/src/gpu/cl/ClContext.h
+++ b/src/gpu/cl/ClContext.h
@@ -21,8 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifndef SRC_GPU_CL_CPUCONTEXT_H
-#define SRC_GPU_CL_CPUCONTEXT_H
+#ifndef SRC_GPU_CLCONTEXT_H
+#define SRC_GPU_CLCONTEXT_H
 
 #include "src/common/IContext.h"
 #include "src/runtime/CL/mlgo/MLGOHeuristics.h"
@@ -65,6 +65,9 @@
      */
     bool set_cl_ctx(::cl::Context ctx);
 
+    // Inherrited methods overridden
+    ITensorV2 *create_tensor(const AclTensorDescriptor &desc, bool allocate) override;
+
 private:
     mlgo::MLGOHeuristics _mlgo_heuristics;
     ::cl::Context        _cl_context;
@@ -73,4 +76,4 @@
 } // namespace gpu
 } // namespace arm_compute
 
-#endif /* SRC_GPU_CL_CPUCONTEXT_H */
\ No newline at end of file
+#endif /* SRC_GPU_CLCONTEXT_H */
\ No newline at end of file
diff --git a/src/gpu/cl/ClTensor.cpp b/src/gpu/cl/ClTensor.cpp
new file mode 100644
index 0000000..db2081c
--- /dev/null
+++ b/src/gpu/cl/ClTensor.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/gpu/cl/ClTensor.h"
+
+#include "src/common/utils/LegacySupport.h"
+
+namespace arm_compute
+{
+namespace gpu
+{
+namespace opencl
+{
+ClTensor::ClTensor(IContext *ctx, const AclTensorDescriptor &desc)
+    : ITensorV2(ctx), _legacy_tensor()
+{
+    ARM_COMPUTE_ASSERT((ctx != nullptr) && (ctx->type() == Target::GpuOcl));
+    _legacy_tensor = std::make_unique<CLTensor>();
+    _legacy_tensor->allocator()->init(arm_compute::detail::convert_to_legacy_tensor_info(desc));
+}
+
+void *ClTensor::map()
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+    if(_legacy_tensor == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[ClTensor:map]: Backing tensor does not exist!");
+        return nullptr;
+    }
+
+    _legacy_tensor->map();
+    return _legacy_tensor->buffer();
+}
+
+StatusCode ClTensor::unmap()
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+    if(_legacy_tensor == nullptr)
+    {
+        ARM_COMPUTE_LOG_ERROR_ACL("[ClTensor:unmap]: Backing tensor does not exist!");
+        return StatusCode::RuntimeError;
+    }
+    _legacy_tensor->unmap();
+
+    return StatusCode::Success;
+}
+
+StatusCode ClTensor::allocate()
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+
+    _legacy_tensor->allocator()->allocate();
+    return StatusCode::Success;
+}
+
+StatusCode ClTensor::import(void *handle, ImportMemoryType type)
+{
+    ARM_COMPUTE_ASSERT(_legacy_tensor.get() != nullptr);
+    ARM_COMPUTE_UNUSED(type, handle);
+
+    return StatusCode::Success;
+}
+
+arm_compute::ITensor *ClTensor::tensor()
+{
+    return _legacy_tensor.get();
+}
+} // namespace opencl
+} // namespace gpu
+} // namespace arm_compute
diff --git a/src/gpu/cl/ClTensor.h b/src/gpu/cl/ClTensor.h
new file mode 100644
index 0000000..4188f62
--- /dev/null
+++ b/src/gpu/cl/ClTensor.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_GPU_CLTENSOR_H
+#define SRC_GPU_CLTENSOR_H
+
+#include "src/common/ITensor.h"
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+
+namespace arm_compute
+{
+namespace gpu
+{
+namespace opencl
+{
+/** OpenCL tensor implementation class */
+class ClTensor final : public ITensorV2
+{
+public:
+    /**  Construct a new OpenCL Tensor object
+     *
+     * @param[in] ctx  Context to be used
+     * @param[in] desc Tensor descriptor
+     */
+    ClTensor(IContext *ctx, const AclTensorDescriptor &desc);
+    /** Allocates tensor
+     *
+     * @return StatusCode A status code
+     */
+    StatusCode allocate();
+
+    // Inherrited functions overriden
+    void                 *map() override;
+    StatusCode            unmap() override;
+    arm_compute::ITensor *tensor() override;
+    StatusCode import(void *handle, ImportMemoryType type) override;
+
+private:
+    std::unique_ptr<CLTensor> _legacy_tensor;
+};
+} // namespace opencl
+} // namespace gpu
+} // namespace arm_compute
+
+#endif /* SRC_GPU_CLTENSOR_H */
\ No newline at end of file