Port DepthConvert to new Api

- Renames DepthConvert to Cast
- Ports both NEDepthConverLayer and CLDepthConvert variants
- Removes legacy shift capability from DepthConvert, allowing only
shifts of 0

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I806a0f8eb23d23502b632c529fda7edde19c8176
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5565
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/CL/CLKernels.h b/src/core/CL/CLKernels.h
index 1302d52..c59eeba 100644
--- a/src/core/CL/CLKernels.h
+++ b/src/core/CL/CLKernels.h
@@ -35,7 +35,6 @@
 #include "src/core/CL/kernels/CLComparisonKernel.h"
 #include "src/core/CL/kernels/CLDeconvolutionLayerUpsampleKernel.h"
 #include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLDepthToSpaceLayerKernel.h"
 #include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NCHWKernel.h"
 #include "src/core/CL/kernels/CLDepthwiseConvolutionLayer3x3NHWCKernel.h"
diff --git a/src/core/CL/cl_kernels/depth_convert.cl b/src/core/CL/cl_kernels/cast.cl
similarity index 93%
rename from src/core/CL/cl_kernels/depth_convert.cl
rename to src/core/CL/cl_kernels/cast.cl
index a888d7b..036a683 100644
--- a/src/core/CL/cl_kernels/depth_convert.cl
+++ b/src/core/CL/cl_kernels/cast.cl
@@ -31,7 +31,7 @@
 
 #define CONVERT_UP(x, type) CONVERT(x, type)
 
-/** This function performs a down-scaling depth conversion.
+/** This function performs a down-casting
  *
  * @attention For QSYMM8_PER_CHANNEL -> QASYMM8, it is user's responsibility to keep track of the quantization info.
  *
@@ -56,12 +56,10 @@
  * @param[in]  out_stride_z                      Stride of the source tensor in Z dimension (in bytes)
  * @param[in]  out_step_z                        out_stride_z * number of elements along Z processed per workitem(in bytes)
  * @param[in]  out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in]  shift                             The integer shift amount value. Supported data types: S32
  */
-__kernel void convert_depth_down(
+__kernel void cast_down(
     TENSOR3D_DECLARATION(in),
-    TENSOR3D_DECLARATION(out),
-    const int shift)
+    TENSOR3D_DECLARATION(out))
 {
     int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
 
@@ -82,12 +80,12 @@
     STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
 #else  /* defined(IS_DATA_TYPE_FLOAT) */
     VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
-    res0 = CONVERT_DOWN(in_data >> shift, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
+    res0 = CONVERT_DOWN(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
     STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
 #endif /* defined(IS_DATA_TYPE_FLOAT) */
 }
 
-/** This function performs a up-scaling depth conversion.
+/** This function performs a up-casting
  *
  * @note The input and output data_types need to be passed at compile time using -DDATA_TYPE_IN and -DDATA_TYPE_OUT:
  * e.g. -DDATA_TYPE_IN=uchar -DDATA_TYPE_OUT=short
@@ -110,12 +108,10 @@
  * @param[in]  out_stride_z                      Stride of the source tensor in Z dimension (in bytes)
  * @param[in]  out_step_z                        out_stride_z * number of elements along Z processed per workitem(in bytes)
  * @param[in]  out_offset_first_element_in_bytes The offset of the first element in the destination image
- * @param[in]  shift                             The integer shift amount value. Supported data types: S32
  */
-__kernel void convert_depth_up(
+__kernel void cast_up(
     TENSOR3D_DECLARATION(in),
-    TENSOR3D_DECLARATION(out),
-    const int shift)
+    TENSOR3D_DECLARATION(out))
 {
     int x_offs = max((int)(get_global_id(0) * VEC_SIZE - (VEC_SIZE - VEC_SIZE_LEFTOVER) % VEC_SIZE), 0);
 
@@ -132,7 +128,7 @@
     STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
 #else  /* defined(IS_DATA_TYPE_FLOAT) */
     VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)
-    res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE)) << shift;
+    res0 = CONVERT_UP(in_data, VEC_DATA_TYPE(DATA_TYPE_OUT, VEC_SIZE));
     STORE_VECTOR_SELECT(res, DATA_TYPE_OUT, out_addr, VEC_SIZE, VEC_SIZE_LEFTOVER, VEC_SIZE_LEFTOVER != 0 && get_global_id(0) == 0)
 #endif /* defined(IS_DATA_TYPE_FLOAT) */
 }
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp b/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
deleted file mode 100644
index 0d5c7a4..0000000
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.cpp
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
-
-#include "arm_compute/core/CL/CLHelpers.h"
-#include "arm_compute/core/CL/CLKernelLibrary.h"
-#include "arm_compute/core/CL/ICLTensor.h"
-#include "arm_compute/core/CL/OpenCL.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Utils.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CL/CLValidate.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/StringSupport.h"
-
-#include <cstddef>
-#include <set>
-#include <string>
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_UNUSED(policy);
-    ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-    ARM_COMPUTE_RETURN_ERROR_ON(input == output);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input,
-                                                         1,
-                                                         DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
-                                                         DataType::U16, DataType::U32, DataType::S32, DataType::F16,
-                                                         DataType::F32);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output,
-                                                         1,
-                                                         DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16,
-                                                         DataType::U16, DataType::U32, DataType::S32, DataType::F16,
-                                                         DataType::F32);
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == output->data_type(), "Input and output data types must be different");
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_float(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs");
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(input->data_type()) && shift != 0, "Shift is used only with integer non-quantized inputs");
-    ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8);
-
-    // Validate in case of configured output
-    if(output->total_size() > 0)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
-    }
-
-    return Status{};
-}
-} // namespace
-
-void CLDepthConvertLayerKernel::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
-{
-    configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift);
-}
-
-void CLDepthConvertLayerKernel::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
-    _input  = input;
-    _output = output;
-
-    // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given)
-    set_shape_if_empty(*output->info(), input->info()->tensor_shape());
-
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift));
-
-    auto padding_info = get_padding_info({ input, output });
-
-    // Get data sizes
-    const size_t input_size  = data_size_from_type(input->info()->data_type());
-    const size_t output_size = data_size_from_type(output->info()->data_type());
-
-    // Get number of elements to process per iterations
-    const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / input->info()->element_size(), input->info()->dimension(0));
-
-    // Set build options
-    CLBuildOptions build_opts;
-    build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
-    build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(input->info()->dimension(0) % num_elems_processed_per_iteration));
-    build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(input->info()->data_type()));
-    build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(output->info()->data_type()));
-    // Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined
-    build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE");
-    build_opts.add_option_if(is_data_type_float(input->info()->data_type()) || is_data_type_float(output->info()->data_type()), "-DIS_DATA_TYPE_FLOAT");
-    build_opts.add_option_if(is_data_type_quantized(input->info()->data_type()), "-DIS_DATA_TYPE_QUANTIZED");
-
-    // Create kernel
-    const std::string kernel_name = (input_size >= output_size) ? "convert_depth_down" : "convert_depth_up";
-    _kernel                       = create_kernel(compile_context, kernel_name, build_opts.options());
-
-    // Set shift arg
-    unsigned int idx = 2 * num_arguments_per_3D_tensor(); // Skip the input and output parameters
-    _kernel.setArg(idx++, shift);
-
-    // Configure kernel
-    Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
-    ICLKernel::configure_internal(win);
-
-    // Collapse window
-    const Window &full_window      = window();
-    Window        collapsed_window = full_window.collapse_if_possible(full_window, Window::DimZ);
-    ICLKernel::configure_internal(collapsed_window);
-
-    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
-
-    // Set config_id for enabling LWS tuning
-    _config_id = kernel_name;
-    _config_id += "_";
-    _config_id += lower_string(string_from_data_type(input->info()->data_type()));
-    _config_id += "_";
-    _config_id += support::cpp11::to_string(input->info()->dimension(0));
-    _config_id += "_";
-    _config_id += support::cpp11::to_string(input->info()->dimension(1));
-    _config_id += "_";
-    _config_id += support::cpp11::to_string(output->info()->dimension(0));
-    _config_id += "_";
-    _config_id += support::cpp11::to_string(output->info()->dimension(1));
-}
-
-Status CLDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift));
-
-    return Status{};
-}
-} // namespace arm_compute
diff --git a/src/core/CL/kernels/CLDepthConvertLayerKernel.h b/src/core/CL/kernels/CLDepthConvertLayerKernel.h
deleted file mode 100644
index 8b511c6..0000000
--- a/src/core/CL/kernels/CLDepthConvertLayerKernel.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H
-#define ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H
-
-#include "arm_compute/core/Types.h"
-#include "src/core/CL/ICLSimple3DKernel.h"
-
-#include <cstdint>
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the depth conversion kernel. */
-class CLDepthConvertLayerKernel : public ICLSimple3DKernel
-{
-public:
-    /** Set the input and output of the kernel.
-     *
-     * Valid conversions Input -> Output :
-     *
-     *   - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
-     *   - U8  -> S8, U16, S16, U32, S32, F16, F32
-     *   - U16 -> U8, S8, S16, U32, S32, F16, F32
-     *   - S16 -> U8, S8, U16, U32, S32, F16, F32
-     *   - U32 -> U8, S8, U16, S16, S32, F16, F32
-     *   - S32 -> U8, S8, U16, S16, U32, F16, F32
-     *   - F16 -> U8, S8, U16, S16, U32, F32
-     *   - F32 -> U8, S8, U16, S16, U32, F16
-     *
-     * @param[in]  input  The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
-     * @param[out] output The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
-     * @param[in]  policy Conversion policy
-     * @param[in]  shift  Value for down/up conversions. Must be 0 <= shift < 8.
-     */
-    void configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
-    /** Set the input and output of the kernel.
-     *
-     * Valid conversions Input -> Output :
-     *
-     *   - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
-     *   - U8  -> S8, U16, S16, U32, S32, F16, F32
-     *   - U16 -> U8, S8, S16, U32, S32, F16, F32
-     *   - S16 -> U8, S8, U16, U32, S32, F16, F32
-     *   - U32 -> U8, S8, U16, S16, S32, F16, F32
-     *   - S32 -> U8, S8, U16, S16, U32, F16, F32
-     *   - F16 -> U8, S8, U16, S16, U32, F32
-     *   - F32 -> U8, S8, U16, S16, U32, F16
-     *
-     * @param[in]  compile_context The compile context to be used.
-     * @param[in]  input           The input tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
-     * @param[out] output          The output tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
-     * @param[in]  policy          Conversion policy
-     * @param[in]  shift           Value for down/up conversions. Must be 0 <= shift < 8.
-     */
-    void configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLDepthConvertLayerKernel
-     *
-     * @param[in] input  Source tensor info. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
-     * @param[in] output Destination tensor info. Data type supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
-     * @param[in] policy Conversion policy
-     * @param[in] shift  Value for down/up conversions. Must be 0 <= shift < 8.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift);
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_CLDEPTHCONVERTKERNEL_H */
diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h
index b11e135..ea15f4e 100644
--- a/src/core/NEON/NEKernels.h
+++ b/src/core/NEON/NEKernels.h
@@ -35,7 +35,6 @@
 #include "src/core/NEON/kernels/NECol2ImKernel.h"
 #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
 #include "src/core/NEON/kernels/NECropKernel.h"
-#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
 #include "src/core/NEON/kernels/NEDepthToSpaceLayerKernel.h"
 #include "src/core/NEON/kernels/NEFFTDigitReverseKernel.h"
 #include "src/core/NEON/kernels/NEFFTRadixStageKernel.h"
diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
deleted file mode 100644
index 4b5208e..0000000
--- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
+++ /dev/null
@@ -1,1410 +0,0 @@
-/*
- * Copyright (c) 2016-2021 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/CPP/Validate.h"
-#include "src/core/NEON/NEFixedPoint.h"
-#include "src/core/NEON/NEMath.h"
-#include "src/core/NEON/wrapper/wrapper.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-#include "support/SaturateCast.h"
-
-using namespace arm_compute;
-
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
-    ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(output);
-    ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(input);
-    ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(output);
-    ARM_COMPUTE_UNUSED(policy);
-    ARM_COMPUTE_RETURN_ERROR_ON(input == output);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8,
-                                                         DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16,
-                                                         DataType::F32, DataType::S32);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8,
-                                                         DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16,
-                                                         DataType::U32, DataType::S32, DataType::F32);
-    ARM_COMPUTE_RETURN_ERROR_ON(shift >= 8);
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8_SIGNED && (output->data_type() != DataType::S16 && output->data_type() != DataType::S32
-                                                                                       && output->data_type() != DataType::F16 && output->data_type() != DataType::F32),
-                                    "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32");
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::QASYMM8 && (output->data_type() != DataType::S16 && output->data_type() != DataType::U16
-                                                                                && output->data_type() != DataType::S32 && output->data_type() != DataType::F16 && output->data_type() != DataType::F32),
-                                    "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32");
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::U8 && (output->data_type() != DataType::S16 && output->data_type() != DataType::U16
-                                                                           && output->data_type() != DataType::S32 && output->data_type() != DataType::F16 && output->data_type() != DataType::F32),
-                                    "Only data_types supported [in] U8 -> [out] U16, S16, S32, F16, F32");
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::U16 && (output->data_type() != DataType::U8 && output->data_type() != DataType::U32),
-                                    "Only data_types supported [in] U16 ->  [out] U8, U32");
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S16 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::U8 && output->data_type() != DataType::S32),
-                                    "Only data_types supported [in] S16 ->  [out] U8, S32");
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::BFLOAT16 && output->data_type() != DataType::F32,
-                                    "Only data_types supported [in] BFLOAT16 ->  [out] F32");
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::F16 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8
-                                                                            && output->data_type() != DataType::U8
-                                                                            && output->data_type() != DataType::F32 && output->data_type() != DataType::S32),
-                                    "Only data_types supported [in] F16 ->  [out] QASYMM8, F32, S32, U8");
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::F32 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8
-                                                                            && output->data_type() != DataType::F16 && output->data_type() != DataType::BFLOAT16
-                                                                            && output->data_type() != DataType::S32 && output->data_type() != DataType::U8),
-                                    "Only data_types supported [in] F32 ->  [out] QASYMM8, BFLOAT16, F16, S32, U8");
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_type() == DataType::S32 && (output->data_type() != DataType::QASYMM8_SIGNED && output->data_type() != DataType::QASYMM8
-                                                                            && output->data_type() != DataType::F16
-                                                                            && output->data_type() != DataType::F32 && output->data_type() != DataType::U8),
-                                    "Only data_types supported [in] S32 ->  [out] QASYMM8, F16, F32, U8");
-
-    // Validate in case of configured output
-    if(output->total_size() > 0)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
-    }
-
-    return Status{};
-}
-} // namespace
-
-NEDepthConvertLayerKernel::NEDepthConvertLayerKernel()
-    : _input(nullptr), _output(nullptr), _policy(), _shift(0)
-{
-}
-
-void NEDepthConvertLayerKernel::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-
-    // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given)
-    set_shape_if_empty(*output->info(), input->info()->tensor_shape());
-
-    _input  = input;
-    _output = output;
-    _policy = policy;
-    _shift  = shift;
-
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift));
-
-    // Configure kernel window
-    Window win = calculate_max_window(*input->info(), Steps());
-
-    ICPPKernel::configure(win);
-}
-
-Status NEDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift));
-    return Status{};
-}
-
-void NEDepthConvertLayerKernel::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
-    ARM_COMPUTE_ERROR_ON_NULLPTR(_input, _output);
-    ARM_COMPUTE_ERROR_ON(_input == _output);
-
-    const auto window_start_x = static_cast<int>(window.x().start());
-    const auto window_end_x   = static_cast<int>(window.x().end());
-    const int  window_step_x  = 16;
-
-    Window win{ window };
-    win.set(Window::DimX, Window::Dimension(0, 1, 1));
-
-    Iterator input(_input, win);
-    Iterator output(_output, win);
-
-    switch(_input->info()->data_type())
-    {
-        case DataType::QASYMM8_SIGNED:
-        {
-            const int16x8_t b = vdupq_n_s16(_shift);
-
-            switch(_output->info()->data_type())
-            {
-                case DataType::S16:
-                {
-                    /* Up-conversion QASYMM8_SIGNED -> S16 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const int8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
-                        int        x          = window_start_x;
-
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
-
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
-                                    vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
-                                }
-                            };
-
-                            vst1q_s16(output_ptr + x, texels.val[0]);
-                            vst1q_s16(output_ptr + x + 8, texels.val[1]);
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<int16_t>(*(input_ptr + x) << _shift);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::S32:
-                {
-                    /* Up-conversion QASYMM8_SIGNED -> S32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const int8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
-                        int        x          = window_start_x;
-
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
-
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
-                                    vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
-                                }
-                            };
-
-                            vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
-                            vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
-                            vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
-                            vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) << _shift);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::F32:
-                {
-                    /* Up-conversion QASYMM8_SIGNED -> F32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const int8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(input.ptr()));
-
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
-                                    vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
-                                }
-                            };
-                            vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
-                            vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
-                            vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
-                            vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<float>(*(input_ptr + x) << _shift);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-                case DataType::F16:
-                {
-                    /* Up-conversion QASYMM8_SIGNED -> F16 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const int8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
-                        int        x          = window_start_x;
-
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
-
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
-                                    vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
-                                }
-                            };
-                            vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0]));
-                            vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) << _shift);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
-                default:
-                    ARM_COMPUTE_ERROR("Output data type not supported");
-            }
-            break;
-        }
-
-        case DataType::QASYMM8:
-        case DataType::U8:
-        {
-            const int16x8_t b = vdupq_n_s16(_shift);
-
-            switch(_output->info()->data_type())
-            {
-                case DataType::S16:
-                {
-                    /* Up-conversion U8 -> S16 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
-
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
-                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
-                                }
-                            };
-
-                            vst1q_s16(output_ptr + x, texels.val[0]);
-                            vst1q_s16(output_ptr + x + 8, texels.val[1]);
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            auto in           = static_cast<int32_t>(*(input_ptr + x));
-                            *(output_ptr + x) = in << _shift;
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::S32:
-                {
-                    /* Up-conversion U8 -> S32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
-
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
-                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
-                                }
-                            };
-
-                            vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
-                            vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
-                            vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
-                            vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            auto in           = static_cast<uint32_t>(*(input_ptr + x));
-                            *(output_ptr + x) = in << _shift;
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::F32:
-                {
-                    /* Up-conversion U8 -> F32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
-
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
-                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
-                                }
-                            };
-                            vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
-                            vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
-                            vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
-                            vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            auto in           = static_cast<uint32_t>(*(input_ptr + x));
-                            *(output_ptr + x) = static_cast<float>(in << _shift);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-                case DataType::F16:
-                {
-                    /* Up-conversion U8 -> F16 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
-
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
-                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
-                                }
-                            };
-                            vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0]));
-                            vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) << _shift);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-                case DataType::U16:
-                {
-                    /* Up-conversion U8 -> U16 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<uint16_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
-
-                            const uint16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_u16(vmovl_u8(vget_low_u8(texels_u8)), b),
-                                    vshlq_u16(vmovl_u8(vget_high_u8(texels_u8)), b)
-                                }
-                            };
-
-                            vst1q_u16(output_ptr + x, texels.val[0]);
-                            vst1q_u16(output_ptr + x + 8, texels.val[1]);
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<uint16_t>(*(input_ptr + x)) << _shift;
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                default:
-                    ARM_COMPUTE_ERROR("Output data type not supported");
-            }
-            break;
-        }
-        case DataType::S16:
-        {
-            switch(_output->info()->data_type())
-            {
-                case DataType::QASYMM8_SIGNED:
-                {
-                    const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift));
-
-                    /* Down-conversion S16 -> QASYMM8_SIGNED */
-                    if(ConvertPolicy::SATURATE == _policy)
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const int16x8x2_t texels =
-                                {
-                                    {
-                                        vqshlq_s16(vld1q_s16(input_ptr + x), b),
-                                        vqshlq_s16(vld1q_s16(input_ptr + x + 8), b)
-                                    }
-                                };
-
-                                vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1])));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    else
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const int16x8x2_t texels =
-                                {
-                                    {
-                                        vshlq_s16(vld1q_s16(input_ptr + x), b),
-                                        vshlq_s16(vld1q_s16(input_ptr + x + 8), b)
-                                    }
-                                };
-
-                                vst1q_s8(output_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1])));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    break;
-                }
-                case DataType::U8:
-                {
-                    const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift));
-
-                    /* Down-conversion S16 -> U8 */
-                    if(ConvertPolicy::SATURATE == _policy)
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const int16x8x2_t texels =
-                                {
-                                    {
-                                        vqshlq_s16(vld1q_s16(input_ptr + x), b),
-                                        vqshlq_s16(vld1q_s16(input_ptr + x + 8), b)
-                                    }
-                                };
-
-                                vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1])));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    else
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const int16x8x2_t texels =
-                                {
-                                    {
-                                        vshlq_s16(vld1q_s16(input_ptr + x), b),
-                                        vshlq_s16(vld1q_s16(input_ptr + x + 8), b)
-                                    }
-                                };
-
-                                vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])),
-                                                                     vmovn_u16(vreinterpretq_u16_s16(texels.val[1]))));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    break;
-                }
-                case DataType::S32:
-                {
-                    const int32x4_t b = vdupq_n_s32(_shift);
-
-                    /* Up-conversion S16 -> S32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vld1q_s16(input_ptr + x),
-                                    vld1q_s16(input_ptr + x + 8)
-                                }
-                            };
-
-                            const int32x4x4_t texels_s32 =
-                            {
-                                {
-                                    vshlq_s32(vmovl_s16(vget_low_s16(texels.val[0])), b),
-                                    vshlq_s32(vmovl_s16(vget_high_s16(texels.val[0])), b),
-                                    vshlq_s32(vmovl_s16(vget_low_s16(texels.val[1])), b),
-                                    vshlq_s32(vmovl_s16(vget_high_s16(texels.val[1])), b)
-                                }
-                            };
-
-                            vst1q_s32(output_ptr + x, texels_s32.val[0]);
-                            vst1q_s32(output_ptr + x + 4, texels_s32.val[1]);
-                            vst1q_s32(output_ptr + x + 8, texels_s32.val[2]);
-                            vst1q_s32(output_ptr + x + 12, texels_s32.val[3]);
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) << _shift);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                default:
-                    ARM_COMPUTE_ERROR("Output data type not supported");
-            }
-            break;
-        }
-        case DataType::U16:
-        {
-            switch(_output->info()->data_type())
-            {
-                case DataType::U8:
-                {
-                    const int16x8_t b = vdupq_n_s16(-static_cast<int16_t>(_shift));
-
-                    /* Down-conversion U16 -> U8 */
-                    if(ConvertPolicy::SATURATE == _policy)
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const uint16_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const uint16x8x2_t texels =
-                                {
-                                    {
-                                        vqshlq_u16(vld1q_u16(input_ptr + x), b),
-                                        vqshlq_u16(vld1q_u16(input_ptr + x + 8), b)
-                                    }
-                                };
-
-                                vst1q_u8(output_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1])));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    else
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const uint16_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const uint16x8x2_t texels =
-                                {
-                                    {
-                                        vshlq_u16(vld1q_u16(input_ptr + x), b),
-                                        vshlq_u16(vld1q_u16(input_ptr + x + 8), b)
-                                    }
-                                };
-
-                                vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1])));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
-                            }
-
-                        },
-                        input, output);
-                    }
-                    break;
-                }
-                case DataType::U32:
-                {
-                    const int32x4_t b = vdupq_n_s32(_shift);
-
-                    /* Up-conversion U16 -> U32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const uint16_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<uint32_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const uint16x8x2_t texels =
-                            {
-                                {
-                                    vld1q_u16(input_ptr + x),
-                                    vld1q_u16(input_ptr + x + 8)
-                                }
-                            };
-
-                            vst1q_u32(output_ptr + x, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[0])), b));
-                            vst1q_u32(output_ptr + x + 4, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[0])), b));
-                            vst1q_u32(output_ptr + x + 8, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[1])), b));
-                            vst1q_u32(output_ptr + x + 12, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[1])), b));
-                        }
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<uint32_t>(*(input_ptr + x) << _shift);
-                        }
-
-                    },
-                    input, output);
-                    break;
-                }
-                default:
-                    ARM_COMPUTE_ERROR("Output data type not supported");
-            }
-            break;
-        }
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
-        case DataType::BFLOAT16:
-            switch(_output->info()->data_type())
-            {
-                case DataType::F32:
-                {
-                    /* Up-conversion BFLOAT16 -> F32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const bfloat16 *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const uint16x8x2_t texels =
-                            {
-                                {
-                                    vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr())),
-                                    vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr()) + 8)
-                                }
-                            };
-
-                            vst1q_f32(reinterpret_cast<float *>(output.ptr()),
-                                      vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[0])), 16)));
-                            vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4,
-                                      vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[0])), 16)));
-                            vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8,
-                                      vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[1])), 16)));
-                            vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12,
-                                      vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[1])), 16)));
-                        }
-
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = float(*(input_ptr + x));
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                default:
-                    ARM_COMPUTE_ERROR("Output data type unsupported");
-            }
-            break;
-#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-        case DataType::F16:
-            switch(_output->info()->data_type())
-            {
-                case DataType::QASYMM8_SIGNED:
-                {
-                    const float16_t   scale_s = 1 << _shift;
-                    const float16x8_t scale   = vdupq_n_f16(scale_s);
-
-                    /* Down-conversion F16 -> QASYMM8_SIGNED (Always saturating) */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float16_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float16x8x2_t texels =
-                            {
-                                {
-                                    vmulq_f16(vld1q_f16(input_ptr + x), scale),
-                                    vmulq_f16(vld1q_f16(input_ptr + x + 8), scale),
-                                }
-                            };
-
-                            vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1]))));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::QASYMM8:
-                case DataType::U8:
-                {
-                    const float16_t   scale_s = 1 << _shift;
-                    const float16x8_t scale   = vdupq_n_f16(scale_s);
-
-                    /* Down-conversion F16 -> QASYMM8/U8 (Always saturating) */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float16_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float16x8x2_t texels =
-                            {
-                                {
-                                    vmulq_f16(vld1q_f16(input_ptr + x), scale),
-                                    vmulq_f16(vld1q_f16(input_ptr + x + 8), scale),
-                                }
-                            };
-
-                            vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1]))));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) * scale_s);
-                        }
-
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::F32:
-                {
-                    const float       scale_s = 1 << _shift;
-                    const float32x4_t scale   = vdupq_n_f32(scale_s);
-
-                    /* Up-conversion F16 -> F32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float16_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float16x8x2_t texels =
-                            {
-                                {
-                                    vld1q_f16(input_ptr + x),
-                                    vld1q_f16(input_ptr + x + 8)
-                                }
-                            };
-                            vst1q_f32(output_ptr + x, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale));
-                            vst1q_f32(output_ptr + x + 4, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale));
-                            vst1q_f32(output_ptr + x + 8, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale));
-                            vst1q_f32(output_ptr + x + 12, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<float>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::S32:
-                {
-                    const float       scale_s = 1 << _shift;
-                    const float32x4_t scale   = vdupq_n_f32(scale_s);
-
-                    /* Up-conversion F16 -> S32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float16_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float16x8x2_t texels =
-                            {
-                                {
-                                    vld1q_f16(input_ptr + x),
-                                    vld1q_f16(input_ptr + x + 8)
-                                }
-                            };
-
-                            vst1q_s32(output_ptr + x, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale)));
-                            vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale)));
-                            vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale)));
-                            vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale)));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                default:
-                    ARM_COMPUTE_ERROR("Output data type not supported");
-            }
-            break;
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-        case DataType::F32:
-            switch(_output->info()->data_type())
-            {
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-                case DataType::F16:
-                {
-                    const float       scale_s = 1.f / (1 << _shift);
-                    const float32x4_t scale   = vdupq_n_f32(scale_s);
-
-                    /* Down-conversion F32 -> F16 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float32x4x4_t texels =
-                            {
-                                {
-                                    vmulq_f32(vld1q_f32(input_ptr + x), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 12), scale)
-                                }
-                            };
-
-                            vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
-                            vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
-                case DataType::BFLOAT16:
-                {
-                    /* Down-conversion F32 -> BFLOAT16 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<bfloat16 *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(input.ptr()),
-                                                   reinterpret_cast<uint16_t *>(output.ptr()));
-                            wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(input.ptr()) + 8,
-                                                   reinterpret_cast<uint16_t *>(output.ptr()) + 8);
-                        }
-
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = *(input_ptr + x);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
-                case DataType::S32:
-                {
-                    const float       scale_s = 1.f / (1 << _shift);
-                    const float32x4_t scale   = vdupq_n_f32(scale_s);
-
-                    /* Conversion F32 -> S32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float32x4x4_t texels =
-                            {
-                                {
-                                    vmulq_f32(vld1q_f32(input_ptr + x), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
-                                }
-                            };
-
-                            vst1q_s32(output_ptr + x, vcvtq_s32_f32(texels.val[0]));
-                            vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(texels.val[1]));
-                            vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(texels.val[2]));
-                            vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(texels.val[3]));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::QASYMM8:
-                case DataType::U8:
-                {
-                    const float       scale_s = 1.f / (1 << _shift);
-                    const float32x4_t scale   = vdupq_n_f32(scale_s);
-
-                    /* Down-conversion F32 -> U8 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float32x4x4_t texels =
-                            {
-                                {
-                                    vmulq_f32(vld1q_f32(input_ptr + x), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
-                                }
-                            };
-
-                            vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1])))));
-                            vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3])))));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::QASYMM8_SIGNED:
-                {
-                    const float       scale_s = 1.f / (1 << _shift);
-                    const float32x4_t scale   = vdupq_n_f32(scale_s);
-
-                    /* Down-conversion F32 -> QASYMM8_SIGNED */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float32x4x4_t texels =
-                            {
-                                {
-                                    vmulq_f32(vld1q_f32(input_ptr + x), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
-                                    vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
-                                }
-                            };
-
-                            vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1])))));
-                            vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3])))));
-                        }
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-
-                default:
-                    ARM_COMPUTE_ERROR("Output data type not supported");
-            }
-            break;
-
-        case DataType::S32:
-            switch(_output->info()->data_type())
-            {
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-                case DataType::F16:
-                {
-                    const float       scale_s = 1.f / (1 << _shift);
-                    const float32x4_t scale   = vdupq_n_f32(scale_s);
-
-                    /* Down-conversion S32 -> F16 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const float32x4x4_t texels =
-                            {
-                                {
-                                    vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x)), scale),
-                                    vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 4)), scale),
-                                    vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 8)), scale),
-                                    vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 12)), scale)
-                                }
-                            };
-
-                            vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
-                            vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
-                case DataType::F32:
-                {
-                    const int       scale_s = 1.f / (1 << _shift);
-                    const int32x4_t scale   = vdupq_n_s32(scale_s);
-
-                    /* Conversion S32 -> F32 */
-                    execute_window_loop(win, [&](const Coordinates &)
-                    {
-                        const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
-                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
-
-                        int x = window_start_x;
-                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                        {
-                            const int32x4x4_t texels =
-                            {
-                                {
-                                    vmulq_s32(vld1q_s32(input_ptr + x), scale),
-                                    vmulq_s32(vld1q_s32(input_ptr + x + 4), scale),
-                                    vmulq_s32(vld1q_s32(input_ptr + x + 8), scale),
-                                    vmulq_s32(vld1q_s32(input_ptr + x + 12), scale),
-                                }
-                            };
-
-                            vst1q_f32(output_ptr + x, vcvtq_f32_s32(texels.val[0]));
-                            vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(texels.val[1]));
-                            vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(texels.val[2]));
-                            vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(texels.val[3]));
-                        }
-
-                        // Compute left-over elements
-                        for(; x < window_end_x; ++x)
-                        {
-                            *(output_ptr + x) = static_cast<float>(*(input_ptr + x) * scale_s);
-                        }
-                    },
-                    input, output);
-                    break;
-                }
-                case DataType::QASYMM8_SIGNED:
-                {
-                    const int32x4_t b = vdupq_n_s32(-static_cast<int32_t>(_shift));
-
-                    /* Down-conversion S32 -> QASYMM8_SIGNED */
-                    if(ConvertPolicy::SATURATE == _policy)
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const int32x4x4_t texels =
-                                {
-                                    {
-                                        vqshlq_s32(vld1q_s32(input_ptr + x), b),
-                                        vqshlq_s32(vld1q_s32(input_ptr + x + 4), b),
-                                        vqshlq_s32(vld1q_s32(input_ptr + x + 8), b),
-                                        vqshlq_s32(vld1q_s32(input_ptr + x + 12), b)
-                                    }
-                                };
-                                vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1]))));
-                                vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3]))));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    else
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const int32x4x4_t texels =
-                                {
-                                    {
-                                        vshlq_s32(vld1q_s32(input_ptr + x), b),
-                                        vshlq_s32(vld1q_s32(input_ptr + x + 4), b),
-                                        vshlq_s32(vld1q_s32(input_ptr + x + 8), b),
-                                        vshlq_s32(vld1q_s32(input_ptr + x + 12), b)
-                                    }
-                                };
-
-                                vst1_s8(output_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1]))));
-                                vst1_s8(output_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3]))));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    break;
-                }
-                case DataType::QASYMM8:
-                case DataType::U8:
-                {
-                    const int32x4_t b = vdupq_n_s32(-static_cast<int32_t>(_shift));
-
-                    /* Down-conversion S32 -> U8 */
-                    if(ConvertPolicy::SATURATE == _policy)
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const int32x4x4_t texels =
-                                {
-                                    {
-                                        vqshlq_s32(vld1q_s32(input_ptr + x), b),
-                                        vqshlq_s32(vld1q_s32(input_ptr + x + 4), b),
-                                        vqshlq_s32(vld1q_s32(input_ptr + x + 8), b),
-                                        vqshlq_s32(vld1q_s32(input_ptr + x + 12), b)
-                                    }
-                                };
-                                vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1]))));
-                                vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3]))));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    else
-                    {
-                        execute_window_loop(win, [&](const Coordinates &)
-                        {
-                            const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
-                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
-
-                            int x = window_start_x;
-                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
-                            {
-                                const int32x4x4_t texels =
-                                {
-                                    {
-                                        vshlq_s32(vld1q_s32(input_ptr + x), b),
-                                        vshlq_s32(vld1q_s32(input_ptr + x + 4), b),
-                                        vshlq_s32(vld1q_s32(input_ptr + x + 8), b),
-                                        vshlq_s32(vld1q_s32(input_ptr + x + 12), b)
-                                    }
-                                };
-
-                                vst1_u8(output_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1])))));
-                                vst1_u8(output_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3])))));
-                            }
-
-                            // Compute left-over elements
-                            for(; x < window_end_x; ++x)
-                            {
-                                *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
-                            }
-                        },
-                        input, output);
-                    }
-                    break;
-                }
-                default:
-                    ARM_COMPUTE_ERROR("Output data type not supported");
-            }
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Not supported");
-    }
-}
diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.h b/src/core/NEON/kernels/NEDepthConvertLayerKernel.h
deleted file mode 100644
index 30fe1ed..0000000
--- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2016-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_DEPTHCONVERTKERNEL_H
-#define ARM_COMPUTE_DEPTHCONVERTKERNEL_H
-
-#include "src/core/NEON/INEKernel.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** Depth conversion kernel
- *  This function ignores the scale and zeroPoint of quanized tensors, i.e. QASYMM8 input is treated as uint8 values.
- */
-class NEDepthConvertLayerKernel : public INEKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NEDepthConvertLayerKernel";
-    }
-    /** Default constructor*/
-    NEDepthConvertLayerKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEDepthConvertLayerKernel(const NEDepthConvertLayerKernel &) = delete;
-    /** Default move constructor */
-    NEDepthConvertLayerKernel(NEDepthConvertLayerKernel &&) = default;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEDepthConvertLayerKernel &operator=(const NEDepthConvertLayerKernel &) = delete;
-    /** Default move assignment operator */
-    NEDepthConvertLayerKernel &operator=(NEDepthConvertLayerKernel &&) = default;
-    /** Default destructor */
-    ~NEDepthConvertLayerKernel() = default;
-    /** Set the input and output of the kernel
-     *
-     * Valid conversions Input -> Output :
-     *
-     *   - QASYMM8_SIGNED -> S16, S32, F32, F16
-     *   - QASYMM8        -> U16, S16, S32, F32, F16
-     *   - U8             -> U16, S16, S32, F32, F16
-     *   - U16            -> U8, U32
-     *   - S16            -> QASYMM8_SIGNED, U8, S32
-     *   - BFLOAT16       -> F32
-     *   - F16            -> QASYMM8_SIGNED, QASYMM8, F32, S32, U8
-     *   - S32            -> QASYMM8_SIGNED, QASYMM8, F16, F32, U8
-     *   - F32            -> QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8
-     *
-     * @param[in]  input  The input tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32.
-     * @param[out] output The output tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32.
-     * @param[in]  policy Conversion policy.
-     * @param[in]  shift  (Optional) Value for down/up conversions. Must be 0 <= shift < 8.
-     */
-    void configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift = 0);
-    /** Static function to check if given info will lead to a valid configuration of @ref NEDepthConvertLayerKernel
-     *
-     * @param[in] input  Source tensor info. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32.
-     * @param[in] output Destination tensor info. Data type supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32.
-     * @param[in] policy Conversion policy
-     * @param[in] shift  (Optional) Value for down/up conversions. Must be 0 <= shift < 8.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift = 0);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-
-private:
-    const ITensor *_input;
-    ITensor       *_output;
-    ConvertPolicy  _policy;
-    uint32_t       _shift;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NEDEPTHCONVERTKERNEL_H */
diff --git a/src/core/cpu/kernels/CpuCastKernel.cpp b/src/core/cpu/kernels/CpuCastKernel.cpp
new file mode 100644
index 0000000..46f3c330
--- /dev/null
+++ b/src/core/cpu/kernels/CpuCastKernel.cpp
@@ -0,0 +1,1367 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuCastKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CPP/Validate.h"
+#include "src/core/NEON/NEFixedPoint.h"
+#include "src/core/NEON/NEMath.h"
+#include "src/core/NEON/wrapper/wrapper.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+#include "support/SaturateCast.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src);
+    ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(dst);
+    ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(src);
+    ARM_COMPUTE_RETURN_ERROR_ON_CPU_BF16_UNSUPPORTED(dst);
+    ARM_COMPUTE_UNUSED(policy);
+    ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8,
+                                                         DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16,
+                                                         DataType::F32, DataType::S32);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8_SIGNED, DataType::QASYMM8, DataType::U8,
+                                                         DataType::S16, DataType::U16, DataType::BFLOAT16, DataType::F16,
+                                                         DataType::U32, DataType::S32, DataType::F32);
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::QASYMM8_SIGNED && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::S32
+                                                                                     && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32),
+                                    "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::QASYMM8 && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::U16
+                                                                              && dst->data_type() != DataType::S32 && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32),
+                                    "Only data_types supported [in] QASYMM8 -> [out] U16, S16, S32, F16, F32");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::U8 && (dst->data_type() != DataType::S16 && dst->data_type() != DataType::U16
+                                                                         && dst->data_type() != DataType::S32 && dst->data_type() != DataType::F16 && dst->data_type() != DataType::F32),
+                                    "Only data_types supported [in] U8 -> [out] U16, S16, S32, F16, F32");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::U16 && (dst->data_type() != DataType::U8 && dst->data_type() != DataType::U32),
+                                    "Only data_types supported [in] U16 ->  [out] U8, U32");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::S16 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::U8 && dst->data_type() != DataType::S32),
+                                    "Only data_types supported [in] S16 ->  [out] U8, S32");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::BFLOAT16 && dst->data_type() != DataType::F32,
+                                    "Only data_types supported [in] BFLOAT16 ->  [out] F32");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::F16 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8
+                                                                          && dst->data_type() != DataType::U8
+                                                                          && dst->data_type() != DataType::F32 && dst->data_type() != DataType::S32),
+                                    "Only data_types supported [in] F16 ->  [out] QASYMM8, F32, S32, U8");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::F32 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8
+                                                                          && dst->data_type() != DataType::F16 && dst->data_type() != DataType::BFLOAT16
+                                                                          && dst->data_type() != DataType::S32 && dst->data_type() != DataType::U8),
+                                    "Only data_types supported [in] F32 ->  [out] QASYMM8, BFLOAT16, F16, S32, U8");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == DataType::S32 && (dst->data_type() != DataType::QASYMM8_SIGNED && dst->data_type() != DataType::QASYMM8
+                                                                          && dst->data_type() != DataType::F16
+                                                                          && dst->data_type() != DataType::F32 && dst->data_type() != DataType::U8),
+                                    "Only data_types supported [in] S32 ->  [out] QASYMM8, F16, F32, U8");
+
+    // Validate in case of configured dst
+    if(dst->total_size() > 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
+    }
+
+    return Status{};
+}
+} // namespace
+
+void CpuCastKernel::configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+    // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given)
+    set_shape_if_empty(*dst, src->tensor_shape());
+
+    _policy = policy;
+
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy));
+
+    // Configure kernel window
+    Window win = calculate_max_window(*src, Steps());
+
+    ICPPKernel::configure(win);
+}
+
+Status CpuCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy));
+    return Status{};
+}
+
+void CpuCastKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+    const auto window_start_x = static_cast<int>(window.x().start());
+    const auto window_end_x   = static_cast<int>(window.x().end());
+    const int  window_step_x  = 16;
+
+    const ITensor *_src = tensors.get_const_tensor(TensorType::ACL_SRC);
+    ITensor       *_dst = tensors.get_tensor(TensorType::ACL_DST);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
+    ARM_COMPUTE_ERROR_ON(_src == _dst);
+
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_src, _dst);
+
+    Window win{ window };
+    win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    Iterator src(_src, win);
+    Iterator dst(_dst, win);
+
+    switch(_src->info()->data_type())
+    {
+        case DataType::QASYMM8_SIGNED:
+        {
+            switch(_dst->info()->data_type())
+            {
+                case DataType::S16:
+                {
+                    /* Up-conversion QASYMM8_SIGNED -> S16 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int16_t *>(dst.ptr());
+                        int        x       = window_start_x;
+
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const int8x16_t texels_s8 = vld1q_s8(src_ptr + x);
+
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vmovl_s8(vget_low_s8(texels_s8)),
+                                    vmovl_s8(vget_high_s8(texels_s8))
+                                }
+                            };
+
+                            vst1q_s16(dst_ptr + x, texels.val[0]);
+                            vst1q_s16(dst_ptr + x + 8, texels.val[1]);
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<int16_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::S32:
+                {
+                    /* Up-conversion QASYMM8_SIGNED -> S32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
+                        int        x       = window_start_x;
+
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const int8x16_t texels_s8 = vld1q_s8(src_ptr + x);
+
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vmovl_s8(vget_low_s8(texels_s8)),
+                                    vmovl_s8(vget_high_s8(texels_s8))
+                                }
+                            };
+
+                            vst1q_s32(dst_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
+                            vst1q_s32(dst_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
+                            vst1q_s32(dst_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
+                            vst1q_s32(dst_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::F32:
+                {
+                    /* Up-conversion QASYMM8_SIGNED -> F32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(src.ptr()));
+
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vmovl_s8(vget_low_s8(texels_s8)),
+                                    vmovl_s8(vget_high_s8(texels_s8))
+                                }
+                            };
+                            vst1q_f32(dst_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
+                            vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
+                            vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
+                            vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<float>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+                case DataType::F16:
+                {
+                    /* Up-conversion QASYMM8_SIGNED -> F16 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const int8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr());
+                        int        x       = window_start_x;
+
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const int8x16_t texels_s8 = vld1q_s8(src_ptr + x);
+
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vmovl_s8(vget_low_s8(texels_s8)),
+                                    vmovl_s8(vget_high_s8(texels_s8))
+                                }
+                            };
+                            vst1q_f16(dst_ptr + x, vcvtq_f16_s16(texels.val[0]));
+                            vst1q_f16(dst_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+                default:
+                    ARM_COMPUTE_ERROR("dst data type not supported");
+            }
+            break;
+        }
+
+        case DataType::QASYMM8:
+        case DataType::U8:
+        {
+            switch(_dst->info()->data_type())
+            {
+                case DataType::S16:
+                {
+                    /* Up-conversion U8 -> S16 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int16_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
+
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))),
+                                    vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8)))
+                                }
+                            };
+
+                            vst1q_s16(dst_ptr + x, texels.val[0]);
+                            vst1q_s16(dst_ptr + x + 8, texels.val[1]);
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::S32:
+                {
+                    /* Up-conversion U8 -> S32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
+
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))),
+                                    vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8)))
+                                }
+                            };
+
+                            vst1q_s32(dst_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
+                            vst1q_s32(dst_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
+                            vst1q_s32(dst_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
+                            vst1q_s32(dst_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::F32:
+                {
+                    /* Up-conversion U8 -> F32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
+
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))),
+                                    vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8)))
+                                }
+                            };
+                            vst1q_f32(dst_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
+                            vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
+                            vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
+                            vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+                case DataType::F16:
+                {
+                    /* Up-conversion U8 -> F16 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
+
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))),
+                                    vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8)))
+                                }
+                            };
+                            vst1q_f16(dst_ptr + x, vcvtq_f16_s16(texels.val[0]));
+                            vst1q_f16(dst_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+                case DataType::U16:
+                {
+                    /* Up-conversion U8 -> U16 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const uint8_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<uint16_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const uint8x16_t texels_u8 = vld1q_u8(src_ptr + x);
+
+                            const uint16x8x2_t texels =
+                            {
+                                {
+                                    vmovl_u8(vget_low_u8(texels_u8)),
+                                    vmovl_u8(vget_high_u8(texels_u8))
+                                }
+                            };
+
+                            vst1q_u16(dst_ptr + x, texels.val[0]);
+                            vst1q_u16(dst_ptr + x + 8, texels.val[1]);
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<uint16_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                default:
+                    ARM_COMPUTE_ERROR("dst data type not supported");
+            }
+            break;
+        }
+        case DataType::S16:
+        {
+            switch(_dst->info()->data_type())
+            {
+                case DataType::QASYMM8_SIGNED:
+                {
+                    /* Down-conversion S16 -> QASYMM8_SIGNED */
+                    if(ConvertPolicy::SATURATE == _policy)
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int16x8x2_t texels =
+                                {
+                                    {
+                                        vld1q_s16(src_ptr + x),
+                                        vld1q_s16(src_ptr + x + 8)
+                                    }
+                                };
+
+                                vst1q_s8(dst_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    else
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int16x8x2_t texels =
+                                {
+                                    {
+                                        vld1q_s16(src_ptr + x),
+                                        vld1q_s16(src_ptr + x + 8)
+                                    }
+                                };
+
+                                vst1q_s8(dst_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = static_cast<int8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    break;
+                }
+                case DataType::U8:
+                {
+                    /* Down-conversion S16 -> U8 */
+                    if(ConvertPolicy::SATURATE == _policy)
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int16x8x2_t texels =
+                                {
+                                    {
+                                        vld1q_s16(src_ptr + x),
+                                        vld1q_s16(src_ptr + x + 8)
+                                    }
+                                };
+
+                                vst1q_u8(dst_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    else
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int16x8x2_t texels =
+                                {
+                                    {
+                                        vld1q_s16(src_ptr + x),
+                                        vld1q_s16(src_ptr + x + 8)
+                                    }
+                                };
+
+                                vst1q_u8(dst_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])),
+                                                                  vmovn_u16(vreinterpretq_u16_s16(texels.val[1]))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    break;
+                }
+                case DataType::S32:
+                {
+                    /* Up-conversion S16 -> S32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const int16_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_s16(src_ptr + x),
+                                    vld1q_s16(src_ptr + x + 8)
+                                }
+                            };
+
+                            const int32x4x4_t texels_s32 =
+                            {
+                                {
+                                    vmovl_s16(vget_low_s16(texels.val[0])),
+                                    vmovl_s16(vget_high_s16(texels.val[0])),
+                                    vmovl_s16(vget_low_s16(texels.val[1])),
+                                    vmovl_s16(vget_high_s16(texels.val[1]))
+                                }
+                            };
+
+                            vst1q_s32(dst_ptr + x, texels_s32.val[0]);
+                            vst1q_s32(dst_ptr + x + 4, texels_s32.val[1]);
+                            vst1q_s32(dst_ptr + x + 8, texels_s32.val[2]);
+                            vst1q_s32(dst_ptr + x + 12, texels_s32.val[3]);
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                default:
+                    ARM_COMPUTE_ERROR("dst data type not supported");
+            }
+            break;
+        }
+        case DataType::U16:
+        {
+            switch(_dst->info()->data_type())
+            {
+                case DataType::U8:
+                {
+                    /* Down-conversion U16 -> U8 */
+                    if(ConvertPolicy::SATURATE == _policy)
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const uint16x8x2_t texels =
+                                {
+                                    {
+                                        vld1q_u16(src_ptr + x),
+                                        vld1q_u16(src_ptr + x + 8)
+                                    }
+                                };
+
+                                vst1q_u8(dst_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    else
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const uint16x8x2_t texels =
+                                {
+                                    {
+                                        vld1q_u16(src_ptr + x),
+                                        vld1q_u16(src_ptr + x + 8)
+                                    }
+                                };
+
+                                vst1q_u8(dst_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x));
+                            }
+
+                        },
+                        src, dst);
+                    }
+                    break;
+                }
+                case DataType::U32:
+                {
+                    /* Up-conversion U16 -> U32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const uint16_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<uint32_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const uint16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_u16(src_ptr + x),
+                                    vld1q_u16(src_ptr + x + 8)
+                                }
+                            };
+
+                            vst1q_u32(dst_ptr + x, vmovl_u16(vget_low_u16(texels.val[0])));
+                            vst1q_u32(dst_ptr + x + 4, vmovl_u16(vget_high_u16(texels.val[0])));
+                            vst1q_u32(dst_ptr + x + 8, vmovl_u16(vget_low_u16(texels.val[1])));
+                            vst1q_u32(dst_ptr + x + 12, vmovl_u16(vget_high_u16(texels.val[1])));
+                        }
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<uint32_t>(*(src_ptr + x));
+                        }
+
+                    },
+                    src, dst);
+                    break;
+                }
+                default:
+                    ARM_COMPUTE_ERROR("dst data type not supported");
+            }
+            break;
+        }
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
+        case DataType::BFLOAT16:
+            switch(_dst->info()->data_type())
+            {
+                case DataType::F32:
+                {
+                    /* Up-conversion BFLOAT16 -> F32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const bfloat16 *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const uint16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_u16(reinterpret_cast<uint16_t *>(src.ptr())),
+                                    vld1q_u16(reinterpret_cast<uint16_t *>(src.ptr()) + 8)
+                                }
+                            };
+
+                            vst1q_f32(reinterpret_cast<float *>(dst.ptr()),
+                                      vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[0])), 16)));
+                            vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 4,
+                                      vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[0])), 16)));
+                            vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 8,
+                                      vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_low_u16(texels.val[1])), 16)));
+                            vst1q_f32(reinterpret_cast<float *>(dst.ptr()) + 12,
+                                      vreinterpretq_f32_u32(vshlq_n_u32(vmovl_u16(vget_high_u16(texels.val[1])), 16)));
+                        }
+
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = float(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                default:
+                    ARM_COMPUTE_ERROR("dst data type unsupported");
+            }
+            break;
+#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+        case DataType::F16:
+            switch(_dst->info()->data_type())
+            {
+                case DataType::QASYMM8_SIGNED:
+                {
+                    /* Down-conversion F16 -> QASYMM8_SIGNED (Always saturating) */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_f16(src_ptr + x),
+                                    vld1q_f16(src_ptr + x + 8),
+                                }
+                            };
+
+                            vst1q_s8(dst_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::QASYMM8:
+                case DataType::U8:
+                {
+                    /* Down-conversion F16 -> QASYMM8/U8 (Always saturating) */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_f16(src_ptr + x),
+                                    vld1q_f16(src_ptr + x + 8),
+                                }
+                            };
+
+                            vst1q_u8(dst_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
+                        }
+
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::F32:
+                {
+                    /* Up-conversion F16 -> F32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_f16(src_ptr + x),
+                                    vld1q_f16(src_ptr + x + 8)
+                                }
+                            };
+                            vst1q_f32(dst_ptr + x, vcvt_f32_f16(vget_low_f16(texels.val[0])));
+                            vst1q_f32(dst_ptr + x + 4, vcvt_f32_f16(vget_high_f16(texels.val[0])));
+                            vst1q_f32(dst_ptr + x + 8, vcvt_f32_f16(vget_low_f16(texels.val[1])));
+                            vst1q_f32(dst_ptr + x + 12, vcvt_f32_f16(vget_high_f16(texels.val[1])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<float>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::S32:
+                {
+                    /* Up-conversion F16 -> S32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float16_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_f16(src_ptr + x),
+                                    vld1q_f16(src_ptr + x + 8)
+                                }
+                            };
+
+                            vst1q_s32(dst_ptr + x, vcvtq_s32_f32(vcvt_f32_f16(vget_low_f16(texels.val[0]))));
+                            vst1q_s32(dst_ptr + x + 4, vcvtq_s32_f32(vcvt_f32_f16(vget_high_f16(texels.val[0]))));
+                            vst1q_s32(dst_ptr + x + 8, vcvtq_s32_f32(vcvt_f32_f16(vget_low_f16(texels.val[1]))));
+                            vst1q_s32(dst_ptr + x + 12, vcvtq_s32_f32(vcvt_f32_f16(vget_high_f16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                default:
+                    ARM_COMPUTE_ERROR("dst data type not supported");
+            }
+            break;
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+        case DataType::F32:
+            switch(_dst->info()->data_type())
+            {
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+                case DataType::F16:
+                {
+                    /* Down-conversion F32 -> F16 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vld1q_f32(src_ptr + x),
+                                    vld1q_f32(src_ptr + x + 4),
+                                    vld1q_f32(src_ptr + x + 8),
+                                    vld1q_f32(src_ptr + x + 12)
+                                }
+                            };
+
+                            vst1q_f16(dst_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
+                            vst1q_f16(dst_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16)
+                case DataType::BFLOAT16:
+                {
+                    /* Down-conversion F32 -> BFLOAT16 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<bfloat16 *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(src.ptr()),
+                                                   reinterpret_cast<uint16_t *>(dst.ptr()));
+                            wrapper::vcvt_bf16_f32(reinterpret_cast<float *>(src.ptr()) + 8,
+                                                   reinterpret_cast<uint16_t *>(dst.ptr()) + 8);
+                        }
+
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = *(src_ptr + x);
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+#endif /* defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) || defined(ARM_COMPUTE_FORCE_BF16) */
+                case DataType::S32:
+                {
+                    /* Conversion F32 -> S32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int32_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vld1q_f32(src_ptr + x),
+                                    vld1q_f32(src_ptr + x + 4),
+                                    vld1q_f32(src_ptr + x + 8),
+                                    vld1q_f32(src_ptr + x + 12),
+                                }
+                            };
+
+                            vst1q_s32(dst_ptr + x, vcvtq_s32_f32(texels.val[0]));
+                            vst1q_s32(dst_ptr + x + 4, vcvtq_s32_f32(texels.val[1]));
+                            vst1q_s32(dst_ptr + x + 8, vcvtq_s32_f32(texels.val[2]));
+                            vst1q_s32(dst_ptr + x + 12, vcvtq_s32_f32(texels.val[3]));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<int32_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::QASYMM8:
+                case DataType::U8:
+                {
+                    /* Down-conversion F32 -> U8 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vld1q_f32(src_ptr + x),
+                                    vld1q_f32(src_ptr + x + 4),
+                                    vld1q_f32(src_ptr + x + 8),
+                                    vld1q_f32(src_ptr + x + 12),
+                                }
+                            };
+
+                            vst1_u8(dst_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1])))));
+                            vst1_u8(dst_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3])))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::QASYMM8_SIGNED:
+                {
+                    /* Down-conversion F32 -> QASYMM8_SIGNED */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const float *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vld1q_f32(src_ptr + x),
+                                    vld1q_f32(src_ptr + x + 4),
+                                    vld1q_f32(src_ptr + x + 8),
+                                    vld1q_f32(src_ptr + x + 12),
+                                }
+                            };
+
+                            vst1_s8(dst_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1])))));
+                            vst1_s8(dst_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3])))));
+                        }
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+
+                default:
+                    ARM_COMPUTE_ERROR("dst data type not supported");
+            }
+            break;
+
+        case DataType::S32:
+            switch(_dst->info()->data_type())
+            {
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+                case DataType::F16:
+                {
+                    /* Down-conversion S32 -> F16 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float16_t *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vcvtq_f32_s32(vld1q_s32(src_ptr + x)),
+                                    vcvtq_f32_s32(vld1q_s32(src_ptr + x + 4)),
+                                    vcvtq_f32_s32(vld1q_s32(src_ptr + x + 8)),
+                                    vcvtq_f32_s32(vld1q_s32(src_ptr + x + 12))
+                                }
+                            };
+
+                            vst1q_f16(dst_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
+                            vst1q_f16(dst_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<float16_t>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+                case DataType::F32:
+                {
+                    /* Conversion S32 -> F32 */
+                    execute_window_loop(win, [&](const Coordinates &)
+                    {
+                        const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+                        const auto dst_ptr = reinterpret_cast<float *>(dst.ptr());
+
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const int32x4x4_t texels =
+                            {
+                                {
+                                    vld1q_s32(src_ptr + x),
+                                    vld1q_s32(src_ptr + x + 4),
+                                    vld1q_s32(src_ptr + x + 8),
+                                    vld1q_s32(src_ptr + x + 12),
+                                }
+                            };
+
+                            vst1q_f32(dst_ptr + x, vcvtq_f32_s32(texels.val[0]));
+                            vst1q_f32(dst_ptr + x + 4, vcvtq_f32_s32(texels.val[1]));
+                            vst1q_f32(dst_ptr + x + 8, vcvtq_f32_s32(texels.val[2]));
+                            vst1q_f32(dst_ptr + x + 12, vcvtq_f32_s32(texels.val[3]));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(dst_ptr + x) = static_cast<float>(*(src_ptr + x));
+                        }
+                    },
+                    src, dst);
+                    break;
+                }
+                case DataType::QASYMM8_SIGNED:
+                {
+                    /* Down-conversion S32 -> QASYMM8_SIGNED */
+                    if(ConvertPolicy::SATURATE == _policy)
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int32x4x4_t texels =
+                                {
+                                    {
+                                        vld1q_s32(src_ptr + x),
+                                        vld1q_s32(src_ptr + x + 4),
+                                        vld1q_s32(src_ptr + x + 8),
+                                        vld1q_s32(src_ptr + x + 12),
+                                    }
+                                };
+                                vst1_s8(dst_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1]))));
+                                vst1_s8(dst_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3]))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = utils::cast::saturate_cast<int8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    else
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<int8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int32x4x4_t texels =
+                                {
+                                    {
+                                        vld1q_s32(src_ptr + x),
+                                        vld1q_s32(src_ptr + x + 4),
+                                        vld1q_s32(src_ptr + x + 8),
+                                        vld1q_s32(src_ptr + x + 12)
+                                    }
+                                };
+
+                                vst1_s8(dst_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1]))));
+                                vst1_s8(dst_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3]))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = static_cast<int8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    break;
+                }
+                case DataType::QASYMM8:
+                case DataType::U8:
+                {
+                    /* Down-conversion S32 -> U8 */
+                    if(ConvertPolicy::SATURATE == _policy)
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int32x4x4_t texels =
+                                {
+                                    {
+                                        vld1q_s32(src_ptr + x),
+                                        vld1q_s32(src_ptr + x + 4),
+                                        vld1q_s32(src_ptr + x + 8),
+                                        vld1q_s32(src_ptr + x + 12)
+                                    }
+                                };
+                                vst1_u8(dst_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1]))));
+                                vst1_u8(dst_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3]))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    else
+                    {
+                        execute_window_loop(win, [&](const Coordinates &)
+                        {
+                            const auto src_ptr = reinterpret_cast<const int32_t *>(src.ptr());
+                            const auto dst_ptr = reinterpret_cast<uint8_t *>(dst.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int32x4x4_t texels =
+                                {
+                                    {
+                                        vld1q_s32(src_ptr + x),
+                                        vld1q_s32(src_ptr + x + 4),
+                                        vld1q_s32(src_ptr + x + 8),
+                                        vld1q_s32(src_ptr + x + 12)
+                                    }
+                                };
+
+                                vst1_u8(dst_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1])))));
+                                vst1_u8(dst_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3])))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(dst_ptr + x) = static_cast<uint8_t>(*(src_ptr + x));
+                            }
+                        },
+                        src, dst);
+                    }
+                    break;
+                }
+                default:
+                    ARM_COMPUTE_ERROR("dst data type not supported");
+            }
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Not supported");
+    }
+}
+
+const char *CpuCastKernel::name() const
+{
+    return "CpuCastKernel.cpp";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuCastKernel.h b/src/core/cpu/kernels/CpuCastKernel.h
new file mode 100644
index 0000000..2a75c58
--- /dev/null
+++ b/src/core/cpu/kernels/CpuCastKernel.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_CAST_KERNEL_H
+#define ARM_COMPUTE_CPU_CAST_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Casts a given tensor to a new type
+ *
+ * @note When casting between quantized types the scale and zeroPoint are ignored
+ */
+class CpuCastKernel : public ICpuKernel
+{
+public:
+    CpuCastKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuCastKernel);
+    /** Set the src and dst of the kernel
+     *
+     * Valid conversions src -> dst :
+     *
+     *   - QASYMM8_SIGNED -> S16, S32, F32, F16
+     *   - QASYMM8        -> U16, S16, S32, F32, F16
+     *   - U8             -> U16, S16, S32, F32, F16
+     *   - U16            -> U8, U32
+     *   - S16            -> QASYMM8_SIGNED, U8, S32
+     *   - BFLOAT16       -> F32
+     *   - F16            -> QASYMM8_SIGNED, QASYMM8, F32, S32, U8
+     *   - S32            -> QASYMM8_SIGNED, QASYMM8, F16, F32, U8
+     *   - F32            -> QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8
+     *
+     * @param[in]  src    The src tensor to convert. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/BFLOAT16/F16/F32.
+     * @param[out] dst    The dst tensor. Data types supported: QASYMM8_SIGNED/QASYMM8/U8/U16/S16/U32/S32/BFLOAT16/F16/F32.
+     * @param[in]  policy Conversion policy.
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration
+     *
+     * Similar to @ref CpuCastKernel::configure()
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+    const char *name() const override;
+
+private:
+    ConvertPolicy _policy{ ConvertPolicy::SATURATE };
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_CAST_KERNEL_H */
diff --git a/src/core/gpu/cl/ClKernelLibrary.cpp b/src/core/gpu/cl/ClKernelLibrary.cpp
index 286ed4c..b0458d7 100644
--- a/src/core/gpu/cl/ClKernelLibrary.cpp
+++ b/src/core/gpu/cl/ClKernelLibrary.cpp
@@ -216,8 +216,8 @@
     { "concatenate_width_x2", "concatenate.cl" },
     { "concatenate_width_x4", "concatenate.cl" },
     { "col2im", "col2im.cl" },
-    { "convert_depth_down", "depth_convert.cl" },
-    { "convert_depth_up", "depth_convert.cl" },
+    { "cast_down", "cast.cl" },
+    { "cast_up", "cast.cl" },
     { "convert_fc_weights", "convert_fc_weights.cl" },
     { "copy_tensor", "copy_tensor.cl" },
     { "crop_tensor", "crop_tensor.cl" },
@@ -565,8 +565,8 @@
 #include "./cl_kernels/deconvolution_layer.clembed"
     },
     {
-        "depth_convert.cl",
-#include "./cl_kernels/depth_convert.clembed"
+        "cast.cl",
+#include "./cl_kernels/cast.clembed"
     },
     {
         "depth_to_space.cl",
diff --git a/src/core/gpu/cl/kernels/ClCastKernel.cpp b/src/core/gpu/cl/kernels/ClCastKernel.cpp
new file mode 100644
index 0000000..7a1d5c2
--- /dev/null
+++ b/src/core/gpu/cl/kernels/ClCastKernel.cpp
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/gpu/cl/kernels/ClCastKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/CLValidate.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include "support/Cast.h"
+#include "support/StringSupport.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
+{
+    ARM_COMPUTE_UNUSED(policy);
+    ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(src);
+    ARM_COMPUTE_RETURN_ERROR_ON(src == dst);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src,
+                                                         1,
+                                                         DataType::U8, DataType::S8, DataType::QSYMM8_PER_CHANNEL, DataType::S16,
+                                                         DataType::U16, DataType::U32, DataType::S32, DataType::F16,
+                                                         DataType::F32);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst,
+                                                         1,
+                                                         DataType::U8, DataType::S8, DataType::QASYMM8, DataType::S16,
+                                                         DataType::U16, DataType::U32, DataType::S32, DataType::F16,
+                                                         DataType::F32);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(src->data_type() == dst->data_type(), "src and dst data types must be different");
+
+    // Validate in case of configured dst
+    if(dst->total_size() > 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, dst);
+    }
+
+    return Status{};
+}
+} // namespace
+
+void ClCastKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+    // Auto initialize dst shape if not initialized (We can only auto-configure the shape, datatype must be given)
+    set_shape_if_empty(*dst, src->tensor_shape());
+
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, policy));
+
+    auto padding_info = get_padding_info({ src, dst });
+
+    // Get data sizes
+    const size_t src_size = data_size_from_type(src->data_type());
+    const size_t dst_size = data_size_from_type(dst->data_type());
+
+    // Get number of elements to process per iterations
+    const unsigned int num_elems_processed_per_iteration = adjust_vec_size(16 / src->element_size(), src->dimension(0));
+
+    // Set build options
+    CLBuildOptions build_opts;
+    build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+    build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(src->dimension(0) % num_elems_processed_per_iteration));
+    build_opts.add_option("-DDATA_TYPE_IN=" + get_cl_type_from_data_type(src->data_type()));
+    build_opts.add_option("-DDATA_TYPE_OUT=" + get_cl_type_from_data_type(dst->data_type()));
+    // Conversions from float always SATURATE as out-of-bounds conversion from float->integer is implementation defined
+    build_opts.add_option_if(is_data_type_float(src->data_type()) || policy == ConvertPolicy::SATURATE, "-DSATURATE");
+    build_opts.add_option_if(is_data_type_float(src->data_type()) || is_data_type_float(dst->data_type()), "-DIS_DATA_TYPE_FLOAT");
+    build_opts.add_option_if(is_data_type_quantized(src->data_type()), "-DIS_DATA_TYPE_QUANTIZED");
+
+    // Create kernel
+    const std::string kernel_name = (src_size >= dst_size) ? "cast_down" : "cast_up";
+    _kernel                       = create_kernel(compile_context, kernel_name, build_opts.options());
+
+    // Configure kernel
+    Window win = calculate_max_window(*src, Steps(num_elems_processed_per_iteration));
+    ICLKernel::configure_internal(win);
+
+    // Collapse window
+    const Window &full_window      = window();
+    Window        collapsed_window = full_window.collapse_if_possible(full_window, Window::DimZ);
+    ICLKernel::configure_internal(collapsed_window);
+
+    ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+
+    // Set config_id for enabling LWS tuning
+    _config_id = kernel_name;
+    _config_id += "_";
+    _config_id += lower_string(string_from_data_type(src->data_type()));
+    _config_id += "_";
+    _config_id += support::cpp11::to_string(src->dimension(0));
+    _config_id += "_";
+    _config_id += support::cpp11::to_string(src->dimension(1));
+    _config_id += "_";
+    _config_id += support::cpp11::to_string(dst->dimension(0));
+    _config_id += "_";
+    _config_id += support::cpp11::to_string(dst->dimension(1));
+}
+
+Status ClCastKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, policy));
+    return Status{};
+}
+
+void ClCastKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
+{
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+    const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
+    auto       dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
+
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+
+    Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+    Window slice     = collapsed.first_slice_window_3D();
+
+    do
+    {
+        unsigned int idx = 0;
+        add_3D_tensor_argument(idx, src, slice);
+        add_3D_tensor_argument(idx, dst, slice);
+        enqueue(queue, *this, slice, lws_hint());
+    }
+    while(collapsed.slide_window_slice_3D(slice));
+}
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/core/gpu/cl/kernels/ClCastKernel.h b/src/core/gpu/cl/kernels/ClCastKernel.h
new file mode 100644
index 0000000..451aa9c
--- /dev/null
+++ b/src/core/gpu/cl/kernels/ClCastKernel.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_CAST_KERNEL_H
+#define ARM_COMPUTE_CL_CAST_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/IClKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+namespace kernels
+{
+/** Casts a given tensor to a new type
+ *
+ * @note When casting between quantized types the scale and zeroPoint are ignored
+ */
+class ClCastKernel : public IClKernel
+{
+public:
+    ClCastKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(ClCastKernel);
+    /** Set the src and dst of the kernel.
+     *
+     * Valid conversions src -> dst :
+     *
+     *   - QSYMM8_PER_CHANNEL -> QASYMM8 (ATTENTION: it is the user's responsibility to keep track of the quantization info in the TensorInfo meta-data)
+     *   - U8  -> S8, U16, S16, U32, S32, F16, F32
+     *   - U16 -> U8, S8, S16, U32, S32, F16, F32
+     *   - S16 -> U8, S8, U16, U32, S32, F16, F32
+     *   - U32 -> U8, S8, U16, S16, S32, F16, F32
+     *   - S32 -> U8, S8, U16, S16, U32, F16, F32
+     *   - F16 -> U8, S8, U16, S16, U32, F32
+     *   - F32 -> U8, S8, U16, S16, U32, F16
+     *
+     * @param[in]  compile_context The compile context to be used.
+     * @param[in]  src             The source tensor to convert. Data types supported: U8/S8/QSYMM8_PER_CHANNEL/U16/S16/U32/S32/F16/F32.
+     * @param[out] dst             The destination tensor. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/F16/F32.
+     * @param[in]  policy          Conversion policy
+     */
+    void configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration
+     *
+     * Similar to @ref ClCastKernel::configure()
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue) override;
+};
+} // namespace kernels
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_CAST_KERNEL_H */
diff --git a/src/graph/backends/CL/CLNodeValidator.cpp b/src/graph/backends/CL/CLNodeValidator.cpp
index 312cda3..8e3b4c8 100644
--- a/src/graph/backends/CL/CLNodeValidator.cpp
+++ b/src/graph/backends/CL/CLNodeValidator.cpp
@@ -28,11 +28,7 @@
 
 #include "arm_compute/runtime/CL/CLFunctions.h"
 #include "arm_compute/runtime/CPP/CPPFunctions.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
-#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLIm2ColKernel.h"
-#include "src/core/CL/kernels/CLQLSTMLayerNormalizationKernel.h"
-#include "src/core/CL/kernels/CLWeightsReshapeKernel.h"
+
 #include "support/Cast.h"
 
 using namespace arm_compute::utils::cast;
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index c030a64..a485e5d 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -28,17 +28,6 @@
 
 #include "arm_compute/runtime/CPP/CPPFunctions.h"
 #include "arm_compute/runtime/NEON/NEFunctions.h"
-#include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixAdditionKernel.h"
-#include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
-#include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
-#include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
-#include "src/core/NEON/kernels/NEWeightsReshapeKernel.h"
 #include "support/Cast.h"
 
 using namespace arm_compute::utils::cast;
diff --git a/src/runtime/CL/functions/CLCast.cpp b/src/runtime/CL/functions/CLCast.cpp
index 202140d..53256eb 100644
--- a/src/runtime/CL/functions/CLCast.cpp
+++ b/src/runtime/CL/functions/CLCast.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,12 +23,31 @@
  */
 #include "arm_compute/runtime/CL/functions/CLCast.h"
 
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClCast.h"
 
 #include <utility>
 
 namespace arm_compute
 {
+struct CLCast::Impl
+{
+    const ICLTensor                *src{ nullptr };
+    ICLTensor                      *dst{ nullptr };
+    std::unique_ptr<opencl::ClCast> op{ nullptr };
+};
+
+CLCast::CLCast()
+    : _impl(std::make_unique<Impl>())
+{
+}
+CLCast::CLCast(CLCast &&) = default;
+CLCast &CLCast::operator=(CLCast &&) = default;
+CLCast::~CLCast()                    = default;
+
 void CLCast::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy)
 {
     configure(CLKernelLibrary::get().get_compile_context(), input, output, policy);
@@ -36,13 +55,23 @@
 
 void CLCast::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy)
 {
-    auto k = std::make_unique<CLDepthConvertLayerKernel>();
-    k->configure(compile_context, input, output, policy, 0);
-    _kernel = std::move(k);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+    _impl->src = input;
+    _impl->dst = output;
+
+    _impl->op = std::make_unique<opencl::ClCast>();
+    _impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), policy);
 }
 
 Status CLCast::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy)
 {
-    return CLDepthConvertLayerKernel::validate(input, output, policy, 0);
+    return opencl::ClCast::validate(input, output, policy);
+}
+
+void CLCast::run()
+{
+    ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } };
+    _impl->op->run(pack);
 }
 } // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLDepthConvertLayer.cpp b/src/runtime/CL/functions/CLDepthConvertLayer.cpp
index 47bc523..6aa370b 100644
--- a/src/runtime/CL/functions/CLDepthConvertLayer.cpp
+++ b/src/runtime/CL/functions/CLDepthConvertLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,12 +23,31 @@
  */
 #include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
 
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/CL/ICLKernel.h"
+#include "src/runtime/gpu/cl/operators/ClCast.h"
 
 #include <utility>
 
 namespace arm_compute
 {
+struct CLDepthConvertLayer::Impl
+{
+    const ICLTensor                *src{ nullptr };
+    ICLTensor                      *dst{ nullptr };
+    std::unique_ptr<opencl::ClCast> op{ nullptr };
+};
+
+CLDepthConvertLayer::CLDepthConvertLayer()
+    : _impl(std::make_unique<Impl>())
+{
+}
+CLDepthConvertLayer::CLDepthConvertLayer(CLDepthConvertLayer &&) = default;
+CLDepthConvertLayer &CLDepthConvertLayer::operator=(CLDepthConvertLayer &&) = default;
+CLDepthConvertLayer::~CLDepthConvertLayer()                                 = default;
+
 void CLDepthConvertLayer::configure(const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
 {
     configure(CLKernelLibrary::get().get_compile_context(), input, output, policy, shift);
@@ -36,13 +55,27 @@
 
 void CLDepthConvertLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, ICLTensor *output, ConvertPolicy policy, uint32_t shift)
 {
-    auto k = std::make_unique<CLDepthConvertLayerKernel>();
-    k->configure(compile_context, input, output, policy, shift);
-    _kernel = std::move(k);
+    ARM_COMPUTE_UNUSED(shift);
+
+    _impl->src = input;
+    _impl->dst = output;
+
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->src, _impl->dst);
+    ARM_COMPUTE_ERROR_ON(shift != 0);
+
+    _impl->op = std::make_unique<opencl::ClCast>();
+    _impl->op->configure(compile_context, _impl->src->info(), _impl->dst->info(), policy);
 }
 
 Status CLDepthConvertLayer::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
 {
-    return CLDepthConvertLayerKernel::validate(input, output, policy, shift);
+    ARM_COMPUTE_RETURN_ERROR_ON(shift != 0);
+    return opencl::ClCast::validate(input, output, policy);
+}
+
+void CLDepthConvertLayer::run()
+{
+    ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } };
+    _impl->op->run(pack);
 }
 } // namespace arm_compute
diff --git a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
index 991472b..50a145f 100644
--- a/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
+++ b/src/runtime/CL/functions/CLFullyConnectedLayer.cpp
@@ -28,7 +28,6 @@
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLFillBorderKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
index 5dc7556..3184d5d 100644
--- a/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMConvolutionLayer.cpp
@@ -31,7 +31,6 @@
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
 #include "src/core/CL/kernels/CLCol2ImKernel.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
diff --git a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
index 7a01018..d5d1b5f 100644
--- a/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
+++ b/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp
@@ -29,7 +29,6 @@
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
 #include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLFillBorderKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
index 099a2c9..3be0958 100644
--- a/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
+++ b/src/runtime/CL/functions/CLGEMMLowpMatrixMultiplyCore.cpp
@@ -34,12 +34,12 @@
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
+#include "src/core/gpu/cl/kernels/ClCastKernel.h"
 #include "src/core/gpu/cl/kernels/ClGemmReshapeRhsMatrixKernel.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/runtime/CL/gemm_auto_heuristics/CLGEMMAutoHeuristics.h"
@@ -189,7 +189,7 @@
 
 CLGEMMLowpMatrixMultiplyCore::CLGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager)
     : _memory_group(std::move(memory_manager)),
-      _weights_to_qasymm8(std::make_unique<CLDepthConvertLayerKernel>()),
+      _weights_to_qasymm8(std::make_unique<opencl::kernels::ClCastKernel>()),
       _mm_native_kernel(std::make_unique<CLGEMMLowpMatrixMultiplyNativeKernel>()),
       _mm_reshaped_only_rhs_kernel(std::make_unique<CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel>()),
       _mtx_b_reshape_kernel(std::make_unique<opencl::kernels::ClGemmReshapeRhsMatrixKernel>()),
@@ -272,7 +272,7 @@
         TensorInfo weights_info(*b->info());
         weights_info.set_data_type(DataType::QASYMM8);
         _qasymm8_weights.allocator()->init(weights_info);
-        _weights_to_qasymm8->configure(compile_context, b, &_qasymm8_weights, ConvertPolicy::WRAP, 0);
+        _weights_to_qasymm8->configure(compile_context, b->info(), _qasymm8_weights.info(), ConvertPolicy::WRAP);
     }
 
     const ICLTensor *matrix_b = _convert_to_qasymm8 ? &_qasymm8_weights : b;
@@ -480,7 +480,7 @@
     {
         b_offset = -128;
         weights_info.set_data_type(DataType::QASYMM8);
-        ARM_COMPUTE_RETURN_ON_ERROR(CLDepthConvertLayerKernel::validate(b, &weights_info, ConvertPolicy::WRAP, 0));
+        ARM_COMPUTE_RETURN_ON_ERROR(opencl::kernels::ClCastKernel::validate(b, &weights_info, ConvertPolicy::WRAP));
     }
     const ITensorInfo *matrix_b_info = &weights_info;
     if(reshape_matrix_b)
@@ -681,7 +681,8 @@
         if(_convert_to_qasymm8)
         {
             _qasymm8_weights.allocator()->allocate();
-            CLScheduler::get().enqueue(*_weights_to_qasymm8, false);
+            ITensorPack convert_to_qs8_pack = { { ACL_SRC, _original_b }, { ACL_DST, &_qasymm8_weights } };
+            CLScheduler::get().enqueue_op(*_weights_to_qasymm8, convert_to_qs8_pack, false);
         }
 
         if(_is_gemm_reshaped && _reshape_b_only_on_first_run)
diff --git a/src/runtime/CL/functions/CLLSTMLayer.cpp b/src/runtime/CL/functions/CLLSTMLayer.cpp
index 146ac8f..85d13c2 100644
--- a/src/runtime/CL/functions/CLLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLLSTMLayer.cpp
@@ -29,7 +29,6 @@
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLFillBorderKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp b/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
index 6997442..a44dcd2 100644
--- a/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
+++ b/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
@@ -27,7 +27,6 @@
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLFillBorderKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLQLSTMLayer.cpp b/src/runtime/CL/functions/CLQLSTMLayer.cpp
index 7b6ec8f..fcf5b9d 100644
--- a/src/runtime/CL/functions/CLQLSTMLayer.cpp
+++ b/src/runtime/CL/functions/CLQLSTMLayer.cpp
@@ -30,7 +30,6 @@
 #include "arm_compute/core/utils/misc/InfoHelpers.h"
 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLFillBorderKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/CL/functions/CLRNNLayer.cpp b/src/runtime/CL/functions/CLRNNLayer.cpp
index 45ced35..755fa40 100644
--- a/src/runtime/CL/functions/CLRNNLayer.cpp
+++ b/src/runtime/CL/functions/CLRNNLayer.cpp
@@ -28,7 +28,6 @@
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/CL/CLScheduler.h"
-#include "src/core/CL/kernels/CLDepthConvertLayerKernel.h"
 #include "src/core/CL/kernels/CLFillBorderKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
 #include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
diff --git a/src/runtime/NEON/functions/NECast.cpp b/src/runtime/NEON/functions/NECast.cpp
index a42f512..b519576 100644
--- a/src/runtime/NEON/functions/NECast.cpp
+++ b/src/runtime/NEON/functions/NECast.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,23 +23,45 @@
  */
 #include "arm_compute/runtime/NEON/functions/NECast.h"
 
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
-
-#include <utility>
+#include "arm_compute/core/Validate.h"
+#include "src/runtime/cpu/operators/CpuCast.h"
 
 namespace arm_compute
 {
+struct NECast::Impl
+{
+    const ITensor                *src{ nullptr };
+    ITensor                      *dst{ nullptr };
+    std::unique_ptr<cpu::CpuCast> op{ nullptr };
+};
+
+NECast::NECast()
+    : _impl(std::make_unique<Impl>())
+{
+}
+NECast::NECast(NECast &&) = default;
+NECast &NECast::operator=(NECast &&) = default;
+NECast::~NECast()                    = default;
+
 void NECast::configure(ITensor *input, ITensor *output, ConvertPolicy policy)
 {
-    auto k = std::make_unique<NEDepthConvertLayerKernel>();
-    k->configure(input, output, policy, 0);
-    _kernel = std::move(k);
+    _impl->src = input;
+    _impl->dst = output;
+
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->src, _impl->dst);
+
+    _impl->op = std::make_unique<cpu::CpuCast>();
+    _impl->op->configure(_impl->src->info(), _impl->dst->info(), policy);
 }
 
 Status NECast::validate(ITensorInfo *input, ITensorInfo *output, ConvertPolicy policy)
 {
-    return NEDepthConvertLayerKernel::validate(input, output, policy, 0);
+    return cpu::CpuCast::validate(input, output, policy);
+}
+
+void NECast::run()
+{
+    ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } };
+    _impl->op->run(pack);
 }
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEDepthConvertLayer.cpp b/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
index 761de8e..07e985c 100644
--- a/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
+++ b/src/runtime/NEON/functions/NEDepthConvertLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2020 Arm Limited.
+ * Copyright (c) 2016-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,20 +23,51 @@
  */
 #include "arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h"
 
-#include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
+#include "arm_compute/core/Validate.h"
+#include "src/runtime/cpu/operators/CpuCast.h"
 
 #include <utility>
 
-using namespace arm_compute;
+namespace arm_compute
+{
+struct NEDepthConvertLayer::Impl
+{
+    const ITensor                *src{ nullptr };
+    ITensor                      *dst{ nullptr };
+    std::unique_ptr<cpu::CpuCast> op{ nullptr };
+};
+
+NEDepthConvertLayer::NEDepthConvertLayer()
+    : _impl(std::make_unique<Impl>())
+{
+}
+NEDepthConvertLayer::NEDepthConvertLayer(NEDepthConvertLayer &&) = default;
+NEDepthConvertLayer &NEDepthConvertLayer::operator=(NEDepthConvertLayer &&) = default;
+NEDepthConvertLayer::~NEDepthConvertLayer()                                 = default;
 
 void NEDepthConvertLayer::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
 {
-    auto k = std::make_unique<NEDepthConvertLayerKernel>();
-    k->configure(input, output, policy, shift);
-    _kernel = std::move(k);
+    ARM_COMPUTE_UNUSED(shift);
+
+    _impl->src = input;
+    _impl->dst = output;
+
+    ARM_COMPUTE_ERROR_ON_NULLPTR(_impl->src, _impl->dst);
+    ARM_COMPUTE_ERROR_ON(shift != 0);
+
+    _impl->op = std::make_unique<cpu::CpuCast>();
+    _impl->op->configure(_impl->src->info(), _impl->dst->info(), policy);
 }
 
 Status NEDepthConvertLayer::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
 {
-    return NEDepthConvertLayerKernel::validate(input, output, policy, shift);
+    ARM_COMPUTE_RETURN_ERROR_ON(shift != 0);
+    return cpu::CpuCast::validate(input, output, policy);
 }
+
+void NEDepthConvertLayer::run()
+{
+    ITensorPack pack = { { ACL_SRC, _impl->src }, { ACL_DST, _impl->dst } };
+    _impl->op->run(pack);
+}
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuCast.cpp b/src/runtime/cpu/operators/CpuCast.cpp
new file mode 100644
index 0000000..5a4f6c5
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuCast.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuCast.h"
+
+#include "src/core/cpu/kernels/CpuCastKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void CpuCast::configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
+{
+    auto k = std::make_unique<kernels::CpuCastKernel>();
+    k->configure(src, dst, policy);
+    _kernel = std::move(k);
+}
+
+Status CpuCast::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
+{
+    return kernels::CpuCastKernel::validate(src, dst, policy);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuCast.h b/src/runtime/cpu/operators/CpuCast.h
new file mode 100644
index 0000000..2aea2d2
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuCast.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_CAST_H
+#define ARM_COMPUTE_CPU_CAST_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Basic function to run @ref kernels::CpuCastKernel */
+class CpuCast : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuCast() = default;
+    /** Configure operator for a given list of arguments
+     *
+     * Input data type must be different than output data type.
+     *
+     * Valid data layouts:
+     * - All
+     *
+     * Valid data type configurations:
+     * |src            |dst                                             |
+     * |:--------------|:-----------------------------------------------|
+     * |QASYMM8_SIGNED | S16, S32, F32, F16                             |
+     * |QASYMM8        | U16, S16, S32, F32, F16                        |
+     * |U8             | U16, S16, S32, F32, F16                        |
+     * |U16            | U8, U32                                        |
+     * |S16            | QASYMM8_SIGNED, U8, S32                        |
+     * |F16            | QASYMM8_SIGNED, QASYMM8, F32, S32, U8          |
+     * |S32            | QASYMM8_SIGNED, QASYMM8, F16, F32, U8          |
+     * |F32            | QASYMM8_SIGNED, QASYMM8, BFLOAT16, F16, S32, U8|
+     *
+     * @param[in]  src    The source tensor to convert. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+     * @param[out] dst    The destination tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+     * @param[in]  policy Conversion policy.
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration
+     *
+     * Similar to @ref CpuCast::configure()
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_ACTIVATION_H */
diff --git a/src/runtime/gpu/cl/operators/ClCast.cpp b/src/runtime/gpu/cl/operators/ClCast.cpp
new file mode 100644
index 0000000..3f54004
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClCast.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClCast.h"
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/kernels/ClCastKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+void ClCast::configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy)
+{
+    auto k = std::make_unique<kernels::ClCastKernel>();
+    k->configure(compile_context, src, dst, policy);
+    _kernel = std::move(k);
+}
+
+Status ClCast::validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy)
+{
+    return kernels::ClCastKernel::validate(src, dst, policy);
+}
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClCast.h b/src/runtime/gpu/cl/operators/ClCast.h
new file mode 100644
index 0000000..69e028d
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClCast.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_CAST_H
+#define ARM_COMPUTE_CL_CAST_H
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+/** Basic function to run @ref kernels::ClCastKernel */
+class ClCast : public IClOperator
+{
+public:
+    /** Constructor */
+    ClCast() = default;
+    /** Configure operator for a given list of arguments
+     *
+     * @note Input data type must be different than output data type.
+     *
+     * Valid data layouts:
+     * - All
+     *
+     * Valid data type configurations:
+     * |src            |dst                                    |
+     * |:--------------|:--------------------------------------|
+     * |U8             | S8, U16, S16, U32, S32, F16, F32      |
+     * |U16            | U8, S8, S16, U32, S32, F16, F32       |
+     * |S16            | U8, S8, U16, U32, S32, F16, F32       |
+     * |U32            | U8, S8, U16, S16, S32, F16, F32       |
+     * |S32            | U8, S8, U16, S16, U32, F16, F32       |
+     * |F16            | U8, S8, U16, S16, U32, F32            |
+     * |F32            | U8, S8, U16, S16, U32, F16            |
+     *
+     * @param[in]  compile_context The compile context to be used.
+     * @param[in]  src             The source tensor to convert. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+     * @param[out] dst             The destinatio tensor. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32.
+     * @param[in]  policy          Conversion policy.
+     */
+    void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration
+     *
+     * Similar to @ref ClCast::configure()
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, ConvertPolicy policy);
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_CAST_H */