Make memset/copy functions state-less

Port following functions:
- NECopy
- NEFill
- NEPermute
- NEReshapeLayer

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I75f3f837012abab79c7dde9a20a34f64f75571d8
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4800
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/Android.bp b/Android.bp
index e686bdf..41ed188 100644
--- a/Android.bp
+++ b/Android.bp
@@ -244,7 +244,6 @@
         "src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.cpp",
         "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.cpp",
         "src/core/NEON/kernels/NEConvolutionKernel.cpp",
-        "src/core/NEON/kernels/NECopyKernel.cpp",
         "src/core/NEON/kernels/NECropKernel.cpp",
         "src/core/NEON/kernels/NECumulativeDistributionKernel.cpp",
         "src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp",
@@ -297,14 +296,12 @@
         "src/core/NEON/kernels/NEMeanStdDevKernel.cpp",
         "src/core/NEON/kernels/NEMeanStdDevNormalizationKernel.cpp",
         "src/core/NEON/kernels/NEMedian3x3Kernel.cpp",
-        "src/core/NEON/kernels/NEMemsetKernel.cpp",
         "src/core/NEON/kernels/NEMinMaxLayerKernel.cpp",
         "src/core/NEON/kernels/NEMinMaxLocationKernel.cpp",
         "src/core/NEON/kernels/NENonLinearFilterKernel.cpp",
         "src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.cpp",
         "src/core/NEON/kernels/NENormalizationLayerKernel.cpp",
         "src/core/NEON/kernels/NEPadLayerKernel.cpp",
-        "src/core/NEON/kernels/NEPermuteKernel.cpp",
         "src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.cpp",
         "src/core/NEON/kernels/NEPoolingLayerKernel.cpp",
         "src/core/NEON/kernels/NEPriorBoxLayerKernel.cpp",
@@ -316,7 +313,6 @@
         "src/core/NEON/kernels/NEReductionOperationKernel.cpp",
         "src/core/NEON/kernels/NERemapKernel.cpp",
         "src/core/NEON/kernels/NEReorgLayerKernel.cpp",
-        "src/core/NEON/kernels/NEReshapeLayerKernel.cpp",
         "src/core/NEON/kernels/NEReverseKernel.cpp",
         "src/core/NEON/kernels/NEScaleKernel.cpp",
         "src/core/NEON/kernels/NEScharr3x3Kernel.cpp",
@@ -416,7 +412,11 @@
         "src/core/cpu/kernels/CpuConcatenateDepthKernel.cpp",
         "src/core/cpu/kernels/CpuConcatenateHeightKernel.cpp",
         "src/core/cpu/kernels/CpuConcatenateWidthKernel.cpp",
+        "src/core/cpu/kernels/CpuCopyKernel.cpp",
+        "src/core/cpu/kernels/CpuFillKernel.cpp",
         "src/core/cpu/kernels/CpuFloorKernel.cpp",
+        "src/core/cpu/kernels/CpuPermuteKernel.cpp",
+        "src/core/cpu/kernels/CpuReshapeKernel.cpp",
         "src/core/cpu/kernels/activation/NEON/fp16.cpp",
         "src/core/cpu/kernels/activation/NEON/fp32.cpp",
         "src/core/cpu/kernels/activation/NEON/qasymm8.cpp",
@@ -776,7 +776,11 @@
         "src/runtime/cpu/operators/CpuActivation.cpp",
         "src/runtime/cpu/operators/CpuAdd.cpp",
         "src/runtime/cpu/operators/CpuConcatenate.cpp",
+        "src/runtime/cpu/operators/CpuCopy.cpp",
+        "src/runtime/cpu/operators/CpuFill.cpp",
         "src/runtime/cpu/operators/CpuFloor.cpp",
+        "src/runtime/cpu/operators/CpuPermute.cpp",
+        "src/runtime/cpu/operators/CpuReshape.cpp",
         "utils/CommonGraphOptions.cpp",
         "utils/GraphUtils.cpp",
         "utils/Utils.cpp",
diff --git a/arm_compute/core/experimental/Types.h b/arm_compute/core/experimental/Types.h
index 4dee5ff..81b4dc8 100644
--- a/arm_compute/core/experimental/Types.h
+++ b/arm_compute/core/experimental/Types.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -38,6 +38,7 @@
 enum TensorType : int32_t
 {
     ACL_UNKNOWN = -1,
+    ACL_SRC_DST = 0,
     ACL_SRC     = 0,
     ACL_SRC_0   = 0,
     ACL_SRC_1   = 1,
diff --git a/arm_compute/runtime/NEON/functions/NECopy.h b/arm_compute/runtime/NEON/functions/NECopy.h
index a58ac9e..d5f22d7 100644
--- a/arm_compute/runtime/NEON/functions/NECopy.h
+++ b/arm_compute/runtime/NEON/functions/NECopy.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,30 +24,33 @@
 #ifndef ARM_COMPUTE_NECOPY_H
 #define ARM_COMPUTE_NECOPY_H
 
+#include "arm_compute/runtime/IFunction.h"
+
 #include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
+
+#include <memory>
 
 namespace arm_compute
 {
 class ITensor;
 class ITensorInfo;
 
-/** Basic function to run @ref NECopyKernel */
-class NECopy : public INESimpleFunctionNoBorder
+/** Basic function to run @ref CpuCopyKernel */
+class NECopy : public IFunction
 {
 public:
-    /** Constructor */
-    NECopy() = default;
+    /** Default Constructor */
+    NECopy();
+    /** Default Destructor */
+    ~NECopy();
     /** Prevent instances of this class from being copied (As this class contains pointers) */
     NECopy(const NECopy &) = delete;
+    /** Default move constructor */
+    NECopy(NECopy &&);
     /** Prevent instances of this class from being copied (As this class contains pointers) */
     NECopy &operator=(const NECopy &) = delete;
-    /** Prevent instances of this class from being moved (As this class contains non movable objects) */
-    NECopy(NECopy &&) = delete;
-    /** Prevent instances of this class from being moved (As this class contains non movable objects) */
-    NECopy &operator=(NECopy &&) = delete;
-    /** Default destructor */
-    ~NECopy();
+    /** Default move assignment operator */
+    NECopy &operator=(NECopy &&);
     /** Initialise the function's source and destination.
      *
      * @param[in]  input  Source tensor. Data types supported: All
@@ -63,6 +66,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+    // Inherited methods overridden
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 } // namespace arm_compute
 #endif /*ARM_COMPUTE_NECOPY_H */
diff --git a/arm_compute/runtime/NEON/functions/NEFill.h b/arm_compute/runtime/NEON/functions/NEFill.h
index 14d690f..3162e26 100644
--- a/arm_compute/runtime/NEON/functions/NEFill.h
+++ b/arm_compute/runtime/NEON/functions/NEFill.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,24 +24,46 @@
 #ifndef ARM_COMPUTE_NEFILL_H
 #define ARM_COMPUTE_NEFILL_H
 
+#include "arm_compute/runtime/IFunction.h"
+
 #include "arm_compute/core/PixelValue.h"
 #include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
+
+#include <memory>
 
 namespace arm_compute
 {
 class ITensor;
 
-/** Basic function to run @ref NEMemsetKernel */
-class NEFill : public INESimpleFunctionNoBorder
+/** Basic function to run @ref CpuFillKernel */
+class NEFill : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEFill();
+    /** Default Destructor */
+    ~NEFill();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEFill(const NEFill &) = delete;
+    /** Default move constructor */
+    NEFill(NEFill &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEFill &operator=(const NEFill &) = delete;
+    /** Default move assignment operator */
+    NEFill &operator=(NEFill &&);
     /** Initialize the function
      *
      * @param[in,out] tensor         Source tensor. Data types supported: All
      * @param[in]     constant_value Constant value to use to fill tensor.
      */
     void configure(ITensor *tensor, PixelValue constant_value);
+
+    // Inherited methods overridden
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 } // namespace arm_compute
 #endif /*ARM_COMPUTE_FILL_H */
diff --git a/arm_compute/runtime/NEON/functions/NEMaxUnpoolingLayer.h b/arm_compute/runtime/NEON/functions/NEMaxUnpoolingLayer.h
index 5b5bb5c..7973a6e 100644
--- a/arm_compute/runtime/NEON/functions/NEMaxUnpoolingLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEMaxUnpoolingLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -32,12 +32,12 @@
 {
 class ITensor;
 class ITensorInfo;
-class NEMemsetKernel;
+class NEFill;
 class NEMaxUnpoolingLayerKernel;
 
 /** Function to perform MaxUnpooling. This function calls the following NEON kernels:
  *
- * -# @ref NEMemsetKernel
+ * -# @ref NEFillKernel
  * -# @ref NEMaxUnpoolingLayerKernel
  */
 class NEMaxUnpoolingLayer : public IFunction
@@ -82,7 +82,7 @@
     void run() override;
 
 private:
-    std::unique_ptr<NEMemsetKernel>            _memset_kernel;
+    std::unique_ptr<NEFill>                    _fill_func;
     std::unique_ptr<NEMaxUnpoolingLayerKernel> _unpooling_layer_kernel;
 };
 }
diff --git a/arm_compute/runtime/NEON/functions/NEPadLayer.h b/arm_compute/runtime/NEON/functions/NEPadLayer.h
index 3fdbb0d..ede9758 100644
--- a/arm_compute/runtime/NEON/functions/NEPadLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEPadLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,6 +26,7 @@
 
 #include "arm_compute/runtime/IFunction.h"
 #include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
+#include "arm_compute/runtime/NEON/functions/NECopy.h"
 #include "arm_compute/runtime/NEON/functions/NEStridedSlice.h"
 #include "arm_compute/runtime/SubTensor.h"
 
@@ -35,7 +36,6 @@
 
 namespace arm_compute
 {
-class NECopyKernel;
 class NEPadLayerKernel;
 
 /** Basic function to pad a tensor. This function calls the following NEON functions/kernels:
@@ -43,7 +43,7 @@
  *  - For padding mode = PaddingMode::CONSTANT:
  *      -# @ref NEPadLayerKernel
  *  - Otherwise:
- *      -# @ref NECopyKernel
+ *      -# @ref NECopy
  *      -# @ref NEStridedSlice
  *      -# @ref NEConcatenateLayer
  *
@@ -109,7 +109,7 @@
     void configure_reflect_symmetric_mode(ITensor *input, ITensor *output);
 
 private:
-    std::unique_ptr<NECopyKernel>     _copy_kernel;
+    NECopy                            _copy_function;
     std::unique_ptr<NEPadLayerKernel> _pad_kernel;
     PaddingMode                       _mode;
     PaddingList                       _padding;
diff --git a/arm_compute/runtime/NEON/functions/NEPermute.h b/arm_compute/runtime/NEON/functions/NEPermute.h
index ef8854b..998a1d6 100644
--- a/arm_compute/runtime/NEON/functions/NEPermute.h
+++ b/arm_compute/runtime/NEON/functions/NEPermute.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,20 +24,34 @@
 #ifndef ARM_COMPUTE_NEPERMUTE_H
 #define ARM_COMPUTE_NEPERMUTE_H
 
-#include "arm_compute/runtime/NEON/INESimpleFunctionNoBorder.h"
+#include "arm_compute/runtime/IFunction.h"
 
 #include "arm_compute/core/Types.h"
 
+#include <memory>
+
 namespace arm_compute
 {
 // Forward declarations
 class ITensor;
 class ITensorInfo;
 
-/** Basic function to run @ref NEPermuteKernel */
-class NEPermute : public INESimpleFunctionNoBorder
+/** Basic function to run @ref CpuPermuteKernel */
+class NEPermute : public IFunction
 {
 public:
+    /** Default Constructor */
+    NEPermute();
+    /** Default Destructor */
+    ~NEPermute();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEPermute(const NEPermute &) = delete;
+    /** Default move constructor */
+    NEPermute(NEPermute &&);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEPermute &operator=(const NEPermute &) = delete;
+    /** Default move assignment operator */
+    NEPermute &operator=(NEPermute &&);
     /** Configure the permute NEON kernel
      *
      * @note Arbitrary permutation vectors are supported with rank not greater than 4
@@ -58,6 +72,13 @@
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm);
+
+    // Inherited methods overridden
+    void run() override;
+
+private:
+    struct Impl;
+    std::unique_ptr<Impl> _impl;
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_NEPERMUTE_H */
diff --git a/arm_compute/runtime/NEON/functions/NERNNLayer.h b/arm_compute/runtime/NEON/functions/NERNNLayer.h
index c42b303..66f7f2e 100644
--- a/arm_compute/runtime/NEON/functions/NERNNLayer.h
+++ b/arm_compute/runtime/NEON/functions/NERNNLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -27,6 +27,7 @@
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/NEON/functions/NEActivationLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEArithmeticAddition.h"
+#include "arm_compute/runtime/NEON/functions/NECopy.h"
 #include "arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h"
 #include "arm_compute/runtime/NEON/functions/NEGEMM.h"
 
@@ -34,7 +35,6 @@
 {
 // Forward declarations
 class ITensor;
-class NECopyKernel;
 
 /** Basic function to run @ref NERNNLayer */
 class NERNNLayer : public IFunction
@@ -83,16 +83,16 @@
     void prepare() override;
 
 private:
-    MemoryGroup                   _memory_group;
-    NEGEMM                        _gemm_state_f;
-    NEArithmeticAddition          _add_f;
-    NEActivationLayer             _activation;
-    NEFullyConnectedLayer         _fully_connected;
-    std::unique_ptr<NECopyKernel> _copy_kernel;
-    Tensor                        _fully_connected_out;
-    Tensor                        _gemm_output;
-    Tensor                        _add_output;
-    bool                          _is_prepared;
+    MemoryGroup           _memory_group;
+    NEGEMM                _gemm_state_f;
+    NEArithmeticAddition  _add_f;
+    NEActivationLayer     _activation;
+    NEFullyConnectedLayer _fully_connected;
+    NECopy                _copy_f;
+    Tensor                _fully_connected_out;
+    Tensor                _gemm_output;
+    Tensor                _add_output;
+    bool                  _is_prepared;
 };
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_NERNNLAYER_H */
diff --git a/arm_compute/runtime/NEON/functions/NEReshapeLayer.h b/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
index 641a96e..b4c3af1 100644
--- a/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -73,41 +73,5 @@
     struct Impl;
     std::unique_ptr<Impl> _impl;
 };
-
-namespace experimental
-{
-/** Basic function to run @ref NEReshapeLayerKernel */
-class NEReshape : public INEOperator
-{
-public:
-    /** Default Constructor */
-    NEReshape() = default;
-    /** Default Destructor */
-    ~NEReshape();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEReshape(const NEReshape &) = delete;
-    /** Default move constructor */
-    NEReshape(NEReshapeLayer &&);
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEReshape &operator=(const NEReshape &) = delete;
-    /** Default move assignment operator */
-    NEReshape &operator=(NEReshape &&);
-    /** Initialise the kernel's inputs and outputs
-     *
-     * @param[in]  input  Input tensor info. Data type supported: All
-     * @param[out] output Output info. Data type supported: Same as @p input
-     */
-    void configure(const ITensorInfo *input, ITensorInfo *output);
-
-    /** Static function to check if given info will lead to a valid configuration of @ref NEReshapeLayer
-     *
-     * @param[in] input  Input tensor info. Data type supported: All
-     * @param[in] output Output tensor info. Data type supported: Same as @p input
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-};
-} // namespace experimental
 } // namespace arm_compute
 #endif /*ARM_COMPUTE_NERESHAPELAYER_H */
diff --git a/arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h b/arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h
index 62af092..3a6f8d7 100644
--- a/arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h
+++ b/arm_compute/runtime/NEON/functions/NESpaceToBatchLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -34,7 +34,7 @@
 class ITensor;
 class ITensorInfo;
 class NESpaceToBatchLayerKernel;
-class NEMemsetKernel;
+class NEFill;
 
 /** Basic function to spatial divide a tensor. This function calls the following NEON kernels/functions:
  *
@@ -102,7 +102,7 @@
 
 private:
     std::unique_ptr<NESpaceToBatchLayerKernel> _space_to_batch_kernel; /**< SpaceToBatch kernel to run */
-    std::unique_ptr<NEMemsetKernel>            _memset_kernel;         /**< Memset kernel to run */
+    std::unique_ptr<NEFill>                    _fill_f;                /**< Fill function to run */
     bool                                       _has_padding;           /**< Flag to check if the output has padding */
 };
 } // namespace arm_compute
diff --git a/docs/00_introduction.dox b/docs/00_introduction.dox
index b8bdabf..5e8769c 100644
--- a/docs/00_introduction.dox
+++ b/docs/00_introduction.dox
@@ -659,8 +659,8 @@
     - @ref CLGEMMDeconvolutionLayer
     - @ref NEDeconvolutionLayer
  - Added SYMMETRIC and REFLECT modes for @ref CLPadLayerKernel / @ref CLPadLayer.
- - Replaced the calls to @ref NECopyKernel and @ref NEMemsetKernel with @ref NEPadLayer in @ref NEGenerateProposalsLayer.
- - Replaced the calls to @ref CLCopyKernel and @ref CLMemsetKernel with @ref CLPadLayer in @ref CLGenerateProposalsLayer.
+ - Replaced the calls to NECopyKernel and NEMemsetKernel with @ref NEPadLayer in @ref NEGenerateProposalsLayer.
+ - Replaced the calls to CLCopyKernel and CLMemsetKernel with @ref CLPadLayer in @ref CLGenerateProposalsLayer.
  - Improved performance for CL Inception V3 - FP16.
  - Improved accuracy for CL Inception V3 - FP16 by enabling FP32 accumulator (mixed-precision).
  - Improved NEON performance by enabling fusing batch normalization with convolution and depth-wise convolution layer.
@@ -828,7 +828,7 @@
     - @ref NEStackLayerKernel / @ref NEStackLayer
     - @ref NERangeKernel / @ref NERange
     - @ref NEPadLayer
-    - @ref NEMemsetKernel
+    - NEMemsetKernel
     - @ref NEGatherKernel / @ref NEGather
     - @ref NEElementwiseComparison
     - @ref NEElementwiseComparisonStatic
@@ -1070,7 +1070,7 @@
  - New NEON kernels / functions
     - Added name() method to all kernels.
     - Added support for Winograd 5x5.
-    - @ref NEPermuteKernel / @ref NEPermute
+    - NEPermuteKernel / @ref NEPermute
     - @ref NEWinogradLayerTransformInputKernel / NEWinogradLayer
     - @ref NEWinogradLayerTransformOutputKernel / NEWinogradLayer
     - @ref NEWinogradLayerTransformWeightsKernel / NEWinogradLayer
@@ -1183,7 +1183,7 @@
     - @ref NEQuantizationLayerKernel @ref NEMinMaxLayerKernel / @ref NEQuantizationLayer
     - @ref NEROIPoolingLayerKernel / @ref NEROIPoolingLayer
     - @ref NEReductionOperationKernel / @ref NEReductionOperation
-    - @ref NEReshapeLayerKernel / @ref NEReshapeLayer
+    - NEReshapeLayerKernel / @ref NEReshapeLayer
 
  - New OpenCL kernels / functions:
     - @ref CLDepthwiseConvolutionLayer3x3NCHWKernel @ref CLDepthwiseConvolutionLayer3x3NHWCKernel CLDepthwiseIm2ColKernel CLDepthwiseVectorToTensorKernel CLDepthwiseWeightsReshapeKernel / CLDepthwiseConvolutionLayer3x3 @ref CLDepthwiseConvolutionLayer CLDepthwiseSeparableConvolutionLayer
diff --git a/src/core/NEON/NEKernels.h b/src/core/NEON/NEKernels.h
index 6c31a73..f20ecc1 100644
--- a/src/core/NEON/NEKernels.h
+++ b/src/core/NEON/NEKernels.h
@@ -45,7 +45,6 @@
 #include "src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h"
 #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
 #include "src/core/NEON/kernels/NEConvolutionKernel.h"
-#include "src/core/NEON/kernels/NECopyKernel.h"
 #include "src/core/NEON/kernels/NECropKernel.h"
 #include "src/core/NEON/kernels/NECumulativeDistributionKernel.h"
 #include "src/core/NEON/kernels/NEDepthConvertLayerKernel.h"
@@ -98,14 +97,12 @@
 #include "src/core/NEON/kernels/NEMeanStdDevKernel.h"
 #include "src/core/NEON/kernels/NEMeanStdDevNormalizationKernel.h"
 #include "src/core/NEON/kernels/NEMedian3x3Kernel.h"
-#include "src/core/NEON/kernels/NEMemsetKernel.h"
 #include "src/core/NEON/kernels/NEMinMaxLayerKernel.h"
 #include "src/core/NEON/kernels/NEMinMaxLocationKernel.h"
 #include "src/core/NEON/kernels/NENonLinearFilterKernel.h"
 #include "src/core/NEON/kernels/NENonMaximaSuppression3x3Kernel.h"
 #include "src/core/NEON/kernels/NENormalizationLayerKernel.h"
 #include "src/core/NEON/kernels/NEPadLayerKernel.h"
-#include "src/core/NEON/kernels/NEPermuteKernel.h"
 #include "src/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h"
 #include "src/core/NEON/kernels/NEPoolingLayerKernel.h"
 #include "src/core/NEON/kernels/NEPriorBoxLayerKernel.h"
@@ -117,7 +114,6 @@
 #include "src/core/NEON/kernels/NEReductionOperationKernel.h"
 #include "src/core/NEON/kernels/NERemapKernel.h"
 #include "src/core/NEON/kernels/NEReorgLayerKernel.h"
-#include "src/core/NEON/kernels/NEReshapeLayerKernel.h"
 #include "src/core/NEON/kernels/NEReverseKernel.h"
 #include "src/core/NEON/kernels/NEScaleKernel.h"
 #include "src/core/NEON/kernels/NEScharr3x3Kernel.h"
diff --git a/src/core/NEON/kernels/NECopyKernel.cpp b/src/core/NEON/kernels/NECopyKernel.cpp
deleted file mode 100644
index 337c44c..0000000
--- a/src/core/NEON/kernels/NECopyKernel.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NECopyKernel.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/Window.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList())
-{
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
-    ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-    ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > 4);
-
-    // Validate output if initialized
-    if(output->total_size() != 0)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(misc::shape_calculator::compute_padded_shape(input->tensor_shape(), padding), output->tensor_shape());
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-    }
-
-    return Status{};
-}
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
-{
-    // Output auto inizialitation if not yet initialized
-    auto_init_if_empty(*output, *input);
-    return std::make_pair(Status{}, calculate_max_window(*output));
-}
-
-std::pair<Status, Window> validate_and_configure_window_with_padding(ITensorInfo *input, ITensorInfo *output, const PaddingList &padding)
-{
-    const TensorShape input_shape  = input->tensor_shape();
-    const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(input_shape, padding);
-    auto_init_if_empty(*output, input->clone()->set_tensor_shape(padded_shape));
-    // Configure window
-    const Window win = calculate_max_window(*output, output->dimension(0));
-    return std::make_pair(Status{}, win);
-}
-
-} // namespace
-
-NECopyKernel::NECopyKernel()
-    : _input(nullptr), _output(nullptr), _padding()
-{
-}
-
-void NECopyKernel::configure(const ITensor *input, ITensor *output, const PaddingList &padding)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), padding));
-
-    _input   = input;
-    _output  = output;
-    _padding = padding;
-
-    std::pair<Status, Window> win_config;
-
-    if(padding.empty())
-    {
-        win_config = validate_and_configure_window(input->info(), output->info());
-    }
-    else
-    {
-        win_config = validate_and_configure_window_with_padding(input->info(), output->info(), padding);
-    }
-
-    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-    INEKernel::configure(win_config.second);
-}
-
-Status NECopyKernel::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output, const PaddingList &padding)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, padding));
-
-    if(padding.empty())
-    {
-        ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
-    }
-    else
-    {
-        ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_with_padding(input->clone().get(), output->clone().get(), padding).first);
-    }
-
-    return Status{};
-}
-
-void NECopyKernel::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-
-    if(_padding.empty())
-    {
-        Window output_window{ window };
-        output_window.set(Window::DimX, Window::Dimension(output_window.x().start(), output_window.x().end(), _input->info()->dimension(0)));
-        Window out_slice = output_window.first_slice_window_1D();
-        do
-        {
-            Iterator input_it(_input, out_slice);
-            Iterator output_it(_output, out_slice);
-
-            execute_window_loop(out_slice, [&](const Coordinates &)
-            {
-                memcpy(output_it.ptr(), input_it.ptr(), _output->info()->dimension(0) * _output->info()->element_size());
-            },
-            input_it, output_it);
-        }
-        while(output_window.slide_window_slice_1D(out_slice));
-    }
-    else
-    {
-        Window input_window{ window };
-        input_window.set(Window::DimX, Window::Dimension(0, window.x().end() - _padding[0].first, _input->info()->dimension(0)));
-
-        Iterator     input_it(_input, input_window);
-        Iterator     output_it(_output, window);
-        const size_t row_size_in_bytes = _input->info()->dimension(0) * _input->info()->element_size();
-        execute_window_loop(window, [&](const Coordinates &)
-        {
-            auto dst_ptr = output_it.ptr() + _padding[0].first * _output->info()->element_size();
-            std::memcpy(dst_ptr, input_it.ptr(), row_size_in_bytes);
-        },
-        input_it, output_it);
-    }
-}
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NECopyKernel.h b/src/core/NEON/kernels/NECopyKernel.h
deleted file mode 100644
index 62b7b80..0000000
--- a/src/core/NEON/kernels/NECopyKernel.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NECOPYKERNEL_H
-#define ARM_COMPUTE_NECOPYKERNEL_H
-
-#include "arm_compute/core/Types.h"
-#include "src/core/NEON/INEKernel.h"
-
-namespace arm_compute
-{
-class ITensor;
-
-/** NEON kernel to perform a copy between two tensors */
-class NECopyKernel : public INEKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NECopyKernel";
-    }
-    /** Default constructor */
-    NECopyKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers). */
-    NECopyKernel(const NECopyKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers). */
-    NECopyKernel &operator=(const NECopyKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NECopyKernel(NECopyKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NECopyKernel &operator=(NECopyKernel &&) = default;
-    /** Default destructor */
-    ~NECopyKernel() = default;
-    /** Initialize the kernel's input, output.
-     *
-     * @param[in]  input   Source tensor. Data types supported: All
-     * @param[out] output  Destination tensor. Data types supported: same as @p input.
-     * @param[in]  padding (Optional) Padding to be applied to the input tensor
-     */
-    void configure(const ITensor *input, ITensor *output, const PaddingList &padding = PaddingList());
-    /** Static function to check if given info will lead to a valid configuration of @ref NECopyKernel
-     *
-     * @param[in] input   Source tensor. Data types supported: All
-     * @param[in] output  Destination tensor. Data types supported: same as @p input.
-     * @param[in] padding (Optional) Padding to be applied to the input tensor
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &padding = PaddingList());
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-
-private:
-    const ITensor *_input;
-    ITensor       *_output;
-    PaddingList    _padding;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NECOPYKERNEL_H */
diff --git a/src/core/NEON/kernels/NEMemsetKernel.h b/src/core/NEON/kernels/NEMemsetKernel.h
deleted file mode 100644
index a720e60..0000000
--- a/src/core/NEON/kernels/NEMemsetKernel.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NEMEMSETKERNEL_H
-#define ARM_COMPUTE_NEMEMSETKERNEL_H
-
-#include "arm_compute/core/PixelValue.h"
-#include "arm_compute/core/Types.h"
-#include "src/core/NEON/INEKernel.h"
-
-namespace arm_compute
-{
-// Forward declarations
-class ITensor;
-
-/** Interface for filling the planes of a tensor */
-class NEMemsetKernel : public INEKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NEMemsetKernel";
-    }
-    /** Default constructor */
-    NEMemsetKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEMemsetKernel(const NEMemsetKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEMemsetKernel &operator=(const NEMemsetKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NEMemsetKernel(NEMemsetKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NEMemsetKernel &operator=(NEMemsetKernel &&) = default;
-    /** Default destructor */
-    ~NEMemsetKernel() = default;
-    /** Initialise the kernel's tensor and filling value
-     *
-     * @param[in,out] tensor         Input tensor to fill. Supported data types: All
-     * @param[in]     constant_value The value used to fill the planes of the tensor
-     */
-    void configure(ITensor *tensor, const PixelValue &constant_value);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-
-private:
-    ITensor   *_tensor;
-    PixelValue _constant_value;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NEMEMSETKERNEL_H */
diff --git a/src/core/NEON/kernels/NEPermuteKernel.cpp b/src/core/NEON/kernels/NEPermuteKernel.cpp
deleted file mode 100644
index 6a9f5d3..0000000
--- a/src/core/NEON/kernels/NEPermuteKernel.cpp
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NEPermuteKernel.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Validate.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-namespace
-{
-#include "src/core/NEON/kernels/convolution/common/shims.hpp"
-} // namespace
-
-namespace arm_compute
-{
-namespace
-{
-inline bool is_permutation_supported(const PermutationVector &v)
-{
-    static const std::array<PermutationVector, 2> permutations2 =
-    {
-        {
-            PermutationVector(0U, 1U),
-            PermutationVector(1U, 0U),
-        }
-    };
-    static const std::array<PermutationVector, 6> permutations3 =
-    {
-        {
-            PermutationVector(2U, 0U, 1U),
-            PermutationVector(1U, 2U, 0U),
-            PermutationVector(0U, 1U, 2U),
-            PermutationVector(0U, 2U, 1U),
-            PermutationVector(1U, 0U, 2U),
-            PermutationVector(2U, 1U, 0U),
-        }
-    };
-    static const std::array<PermutationVector, 24> permutations4 =
-    {
-        {
-            PermutationVector(0U, 1U, 2U, 3U),
-            PermutationVector(1U, 0U, 2U, 3U),
-            PermutationVector(2U, 0U, 1U, 3U),
-            PermutationVector(0U, 2U, 1U, 3U),
-            PermutationVector(1U, 2U, 0U, 3U),
-            PermutationVector(2U, 1U, 0U, 3U),
-            PermutationVector(2U, 1U, 3U, 0U),
-            PermutationVector(1U, 2U, 3U, 0U),
-            PermutationVector(3U, 2U, 1U, 0U),
-            PermutationVector(2U, 3U, 1U, 0U),
-            PermutationVector(1U, 3U, 2U, 0U),
-            PermutationVector(3U, 1U, 2U, 0U),
-            PermutationVector(3U, 0U, 2U, 1U),
-            PermutationVector(0U, 3U, 2U, 1U),
-            PermutationVector(2U, 3U, 0U, 1U),
-            PermutationVector(3U, 2U, 0U, 1U),
-            PermutationVector(0U, 2U, 3U, 1U),
-            PermutationVector(2U, 0U, 3U, 1U),
-            PermutationVector(1U, 0U, 3U, 2U),
-            PermutationVector(0U, 1U, 3U, 2U),
-            PermutationVector(3U, 1U, 0U, 2U),
-            PermutationVector(1U, 3U, 0U, 2U),
-            PermutationVector(0U, 3U, 1U, 2U),
-            PermutationVector(3U, 0U, 1U, 2U)
-        }
-    };
-
-    return (permutations2.end() != std::find(permutations2.begin(), permutations2.end(), v)) || (permutations3.end() != std::find(permutations3.begin(), permutations3.end(), v))
-           || (permutations4.end() != std::find(permutations4.begin(), permutations4.end(), v));
-}
-
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
-{
-    ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_permutation_supported(perm), "PermutationVector not supported.");
-
-    const TensorShape output_shape = misc::shape_calculator::compute_permutation_output_shape(*input, perm);
-
-    // Validate configured output
-    if(output->total_size() != 0)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
-        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-    }
-
-    return Status{};
-}
-} // namespace
-
-template <typename T>
-void NEPermuteKernel::run_permute(const Window &window)
-{
-    const DataLayout input_layout = _input->info()->data_layout();
-
-    // Input window
-    Window window_in = window;
-
-    // we only support these two configs in src/core/NEON/kernels/convolution/common/shims.hpp, for all others
-    // we have to fall back to C++
-    if((input_layout == DataLayout::NCHW && _perm == PermutationVector{ 2U, 0U, 1U }) || (input_layout == DataLayout::NHWC && _perm == PermutationVector{ 1U, 2U, 0U }))
-    {
-        window_in.set(Window::DimX, Window::Dimension(window.x().start(), window.x().end(), window.x().end() - window.x().start()));
-        window_in.set(Window::DimY, Window::Dimension(window.y().start(), window.y().end(), window.y().end() - window.y().start()));
-        window_in.set(Window::DimZ, Window::Dimension(window.z().start(), window.z().end(), window.z().end() - window.z().start()));
-        window_in.set(3, Window::Dimension(window[3].start(), window[3].end(), window[3].end() - window[3].start()));
-    }
-
-    // Output window
-    Window                  window_out(window);
-    const Window::Dimension zero_window = Window::Dimension(0, 0, 0);
-    for(size_t d = 0; d <= _output->info()->num_dimensions(); ++d)
-    {
-        window_out.set(d, zero_window);
-    }
-
-    // Create iterators
-    Iterator in(_input, window_in);
-    Iterator out(_output, window_out);
-
-    int in_row_stride     = 0;
-    int in_col_stride     = 0;
-    int in_channel_stride = 0;
-    int in_batch_stride   = 0;
-    int n_cols            = 0;
-    int n_rows            = 0;
-    int n_channels        = 0;
-    int n_batches         = 0;
-
-    switch(input_layout)
-    {
-        case DataLayout::NCHW:
-        {
-            in_row_stride     = _input->info()->strides_in_bytes().y() / sizeof(T);
-            in_channel_stride = _input->info()->strides_in_bytes().z() / sizeof(T);
-            in_batch_stride   = _input->info()->strides_in_bytes()[3] / sizeof(T);
-            n_cols            = _input->info()->tensor_shape().x();
-            n_rows            = window_in.y().step();
-            n_channels        = _input->info()->tensor_shape().z();
-            n_batches         = _input->info()->tensor_shape()[3];
-            break;
-        }
-        case DataLayout::NHWC:
-        {
-            in_col_stride   = _input->info()->strides_in_bytes().y() / sizeof(T);
-            in_row_stride   = _input->info()->strides_in_bytes().z() / sizeof(T);
-            in_batch_stride = _input->info()->strides_in_bytes()[3] / sizeof(T);
-            n_channels      = _input->info()->tensor_shape().x();
-            n_cols          = window_in.y().step();
-            n_rows          = _input->info()->tensor_shape().z();
-            n_batches       = _input->info()->tensor_shape()[3];
-            break;
-        }
-        default:
-        {
-            ARM_COMPUTE_ERROR("Invalid input data layout.");
-            break;
-        }
-    }
-
-    // CHW -> HWC
-    if(input_layout == DataLayout::NCHW && _perm == PermutationVector{ 2U, 0U, 1U })
-    {
-        const int out_channel_stride = _output->info()->strides_in_bytes().x() / sizeof(T);
-        const int out_col_stride     = _output->info()->strides_in_bytes().y() / sizeof(T);
-        const int out_row_stride     = _output->info()->strides_in_bytes().z() / sizeof(T);
-        const int out_batch_stride   = _output->info()->strides_in_bytes()[3] / sizeof(T);
-        execute_window_loop(window_in, [&](const Coordinates & id)
-        {
-            const int idx = id[0] * out_col_stride + id[1] * out_row_stride + id[2] * out_channel_stride;
-            reorder::nchw_to_nhwc(reinterpret_cast<const T *>(in.ptr()), reinterpret_cast<T *>(out.ptr()) + idx,
-                                  n_batches, n_channels, n_rows, n_cols,
-                                  in_batch_stride, in_channel_stride, in_row_stride,
-                                  out_batch_stride, out_row_stride, out_col_stride);
-        },
-        in, out);
-    }
-    // HWC -> CHW
-    else if(input_layout == DataLayout::NHWC && _perm == PermutationVector{ 1U, 2U, 0U })
-    {
-        const int out_col_stride     = _output->info()->strides_in_bytes().x() / sizeof(T);
-        const int out_row_stride     = _output->info()->strides_in_bytes().y() / sizeof(T);
-        const int out_channel_stride = _output->info()->strides_in_bytes().z() / sizeof(T);
-        const int out_batch_stride   = _output->info()->strides_in_bytes()[3] / sizeof(T);
-        execute_window_loop(window_in, [&](const Coordinates & id)
-        {
-            const int idx = id[0] * out_channel_stride + id[1] * out_col_stride + id[2] * out_row_stride;
-            reorder::nhwc_to_nchw(reinterpret_cast<const T *>(in.ptr()), reinterpret_cast<T *>(out.ptr()) + idx,
-                                  n_batches, n_rows, n_cols, n_channels,
-                                  in_batch_stride, in_row_stride, in_col_stride,
-                                  out_batch_stride, out_channel_stride, out_row_stride);
-        },
-        in, out);
-    }
-    else
-    {
-        // All other cases fall back to C++
-        // Permute strides
-        Strides strides      = _output->info()->strides_in_bytes();
-        Strides perm_strides = strides;
-        permute_strides(perm_strides, _perm);
-        const int perm_stride_3 = _input->info()->num_dimensions() >= 4 ? perm_strides[3] : 0;
-        execute_window_loop(window, [&](const Coordinates & id)
-        {
-            const int idx                             = id[0] * perm_strides[0] + id[1] * perm_strides[1] + id[2] * perm_strides[2] + id[3] * perm_stride_3;
-            *(reinterpret_cast<T *>(out.ptr() + idx)) = *(reinterpret_cast<const T *>(in.ptr()));
-        },
-        in, out);
-    }
-}
-
-NEPermuteKernel::NEPermuteKernel()
-    : _func(), _input(nullptr), _output(nullptr), _perm()
-{
-}
-
-void NEPermuteKernel::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-    const TensorShape output_shape = misc::shape_calculator::compute_permutation_output_shape(*input->info(), perm);
-    // Output auto inizialitation if not yet initialized
-    auto_init_if_empty(*output->info(), input->info()->clone()->set_tensor_shape(output_shape));
-
-    // Perform validation step
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), perm));
-
-    _input  = input;
-    _output = output;
-    _perm   = perm;
-
-    switch(input->info()->element_size())
-    {
-        case 1:
-            _func = &NEPermuteKernel::run_permute<uint8_t>;
-            break;
-        case 2:
-            _func = &NEPermuteKernel::run_permute<uint16_t>;
-            break;
-        case 4:
-            _func = &NEPermuteKernel::run_permute<uint32_t>;
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Element size not supported");
-            break;
-    }
-
-    // Configure kernel window
-    Window win = calculate_max_window(*input->info(), Steps());
-
-    // The NEPermute doesn't need padding so update_window_and_padding() can be skipped
-    Coordinates coord;
-    coord.set_num_dimensions(output->info()->num_dimensions());
-    output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
-
-    ICPPKernel::configure(win);
-}
-
-Status NEPermuteKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, perm));
-    return Status{};
-}
-
-void NEPermuteKernel::run(const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window);
-
-    if(_func != nullptr)
-    {
-        (this->*_func)(window);
-    }
-}
-} // namespace arm_compute
diff --git a/src/core/NEON/kernels/NEPermuteKernel.h b/src/core/NEON/kernels/NEPermuteKernel.h
deleted file mode 100644
index 80187de..0000000
--- a/src/core/NEON/kernels/NEPermuteKernel.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2018-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NEPERMUTEKERNEL_H
-#define ARM_COMPUTE_NEPERMUTEKERNEL_H
-
-#include "src/core/NEON/INEKernel.h"
-
-namespace arm_compute
-{
-// Forward declarations
-class ITensor;
-
-/** NEON kernel to perform tensor permutation.
- *
- * Permutes given a permutation vector
- */
-class NEPermuteKernel : public INEKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NEPermuteKernel";
-    }
-    /** Default constructor */
-    NEPermuteKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEPermuteKernel(const NEPermuteKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEPermuteKernel &operator=(const NEPermuteKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NEPermuteKernel(NEPermuteKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NEPermuteKernel &operator=(NEPermuteKernel &&) = default;
-    /** Default destructor */
-    ~NEPermuteKernel() = default;
-
-    /** Set the input and output of the kernel.
-     *
-     * @note Arbitrary permutation vectors are supported with rank not greater than 4
-     *
-     * @param[in]  input  The input tensor to permute. Data types supported: All
-     * @param[out] output The output tensor. Data types supported: Same as @p input
-     * @param[in]  perm   Permutation vector
-     */
-    void configure(const ITensor *input, ITensor *output, const PermutationVector &perm);
-    /** Static function to check if given info will lead to a valid configuration of @ref CPPPermuteKernel
-     *
-     * @note Arbitrary permutation vectors are supported with rank not greater than 4
-     *
-     * @param[in] input  The input tensor to permute. Data types supported: All
-     * @param[in] output The output tensor. Data types supported: Same as @p input
-     * @param[in] perm   Permutation vector
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm);
-
-    // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
-
-private:
-    /** Template function to run the permute
-     *
-     * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
-     */
-    template <typename T>
-    void run_permute(const Window &window);
-
-    /** Common signature for all the specialised permute functions
-     *
-     * @param[in] window Region on which to execute the kernel.
-     */
-    using PermuteFunctionPtr = void (NEPermuteKernel::*)(const Window &window);
-
-    PermuteFunctionPtr _func;
-    const ITensor     *_input;
-    ITensor           *_output;
-    PermutationVector  _perm;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NEPERMUTEKERNEL_H */
diff --git a/src/core/NEON/kernels/NEReshapeLayerKernel.cpp b/src/core/NEON/kernels/NEReshapeLayerKernel.cpp
deleted file mode 100644
index 462404f..0000000
--- a/src/core/NEON/kernels/NEReshapeLayerKernel.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "src/core/NEON/kernels/NEReshapeLayerKernel.h"
-
-#include "arm_compute/core/Error.h"
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/IAccessWindow.h"
-#include "arm_compute/core/ITensor.h"
-#include "arm_compute/core/TensorInfo.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/core/Validate.h"
-#include "src/core/AccessWindowStatic.h"
-#include "src/core/CPP/Validate.h"
-#include "src/core/NEON/INEKernel.h"
-#include "src/core/helpers/AutoConfiguration.h"
-#include "src/core/helpers/WindowHelpers.h"
-
-#include <cstdint>
-
-/** [NEReshapeLayerKernel Kernel] **/
-namespace arm_compute
-{
-namespace
-{
-Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output)
-{
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
-    // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input) is not needed here as this kernel doesn't use NEON FP16 instructions.
-    ARM_COMPUTE_RETURN_ERROR_ON(input->data_type() == DataType::UNKNOWN);
-
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
-    ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().total_size() != output->tensor_shape().total_size());
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(input, output);
-
-    return Status{};
-}
-
-template <typename T>
-inline void reshape_tensor(const Window &window, const ITensor *input, ITensor *output)
-{
-    const TensorShape &input_shape  = input->info()->tensor_shape();
-    const TensorShape &output_shape = output->info()->tensor_shape();
-    Coordinates        output_coord{};
-
-    Iterator in(input, window);
-
-    execute_window_loop(window, [&](const Coordinates & id)
-    {
-        output_coord                                                 = index2coords(output_shape, coords2index(input_shape, id));
-        *reinterpret_cast<T *>(output->ptr_to_element(output_coord)) = *reinterpret_cast<T *>(in.ptr());
-    },
-    in);
-}
-} // namespace
-
-void NEReshapeLayerKernel::configure(const ITensorInfo *input, ITensorInfo *output)
-{
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input, output));
-
-    // Configure kernel window
-    Window win = calculate_max_window(*input);
-
-    // Set the output valid region
-    output->set_valid_region(ValidRegion(Coordinates(), output->tensor_shape()));
-
-    INEKernel::configure(win);
-}
-
-void NEReshapeLayerKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
-{
-    ARM_COMPUTE_UNUSED(info);
-    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
-
-    const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
-    auto       dst = tensors.get_tensor(TensorType::ACL_DST);
-
-    switch(src->info()->data_type())
-    {
-        case DataType::U8:
-        case DataType::S8:
-        case DataType::QASYMM8:
-        case DataType::QASYMM8_SIGNED:
-            reshape_tensor<uint8_t>(window, src, dst);
-            break;
-        case DataType::U16:
-        case DataType::S16:
-        case DataType::F16:
-            reshape_tensor<uint16_t>(window, src, dst);
-            break;
-        case DataType::U32:
-        case DataType::S32:
-        case DataType::F32:
-            reshape_tensor<uint32_t>(window, src, dst);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Unsupported data type!");
-    }
-}
-
-Status NEReshapeLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output));
-
-    return Status{};
-}
-} // namespace arm_compute
-/** [NEReshapeLayerKernel Kernel] **/
diff --git a/src/core/NEON/kernels/NEReshapeLayerKernel.h b/src/core/NEON/kernels/NEReshapeLayerKernel.h
deleted file mode 100644
index ecec8d9..0000000
--- a/src/core/NEON/kernels/NEReshapeLayerKernel.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_NERESHAPELAYERKERNEL_H
-#define ARM_COMPUTE_NERESHAPELAYERKERNEL_H
-
-#include "src/core/NEON/INEKernel.h"
-#include "src/core/NEON/INESimpleKernel.h"
-
-namespace arm_compute
-{
-// Forward declarations
-class ITensor;
-
-/** Interface for the kernel to perform tensor reshaping */
-class NEReshapeLayerKernel : public INEKernel
-{
-public:
-    const char *name() const override
-    {
-        return "NEReshapeLayerKernel";
-    }
-    /** Default constructor */
-    NEReshapeLayerKernel() = default;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEReshapeLayerKernel(const NEReshapeLayerKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    NEReshapeLayerKernel &operator=(const NEReshapeLayerKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    NEReshapeLayerKernel(NEReshapeLayerKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    NEReshapeLayerKernel &operator=(NEReshapeLayerKernel &&) = default;
-    /** Default destructor */
-    ~NEReshapeLayerKernel() = default;
-    /** Set the input and output info of the kernel
-     *
-     * @param[in]  input  Source tensor info. Data type supported: All
-     * @param[out] output Destination tensor info. Data type supported: Same as @p input
-     */
-    void configure(const ITensorInfo *input, ITensorInfo *output);
-
-    /** Static function to check if given info will lead to a valid configuration of @ref NEReshapeLayerKernel
-     *
-     * @param[in] input  Source tensor info. Data type supported: All
-     * @param[in] output Destination tensor info. Data type supported: Same as @p input
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output);
-
-    // Inherited methods overridden:
-    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
-};
-} // namespace arm_compute
-#endif /*ARM_COMPUTE_NERESHAPELAYERKERNEL_H */
diff --git a/src/core/cpu/kernels/CpuActivationKernel.h b/src/core/cpu/kernels/CpuActivationKernel.h
index 083915b..e49171b 100644
--- a/src/core/cpu/kernels/CpuActivationKernel.h
+++ b/src/core/cpu/kernels/CpuActivationKernel.h
@@ -39,7 +39,7 @@
 public:
     CpuActivationKernel() = default;
     ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuActivationKernel);
-    /** Set the input and output tensor.
+    /** Configure kernel for a given list of arguments
      *
      * @note If the output tensor is a nullptr, the activation function will be performed in-place
      *
diff --git a/src/core/cpu/kernels/CpuCopyKernel.cpp b/src/core/cpu/kernels/CpuCopyKernel.cpp
new file mode 100644
index 0000000..8ec354b
--- /dev/null
+++ b/src/core/cpu/kernels/CpuCopyKernel.cpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuCopyKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PaddingList &padding = PaddingList())
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+    ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
+    ARM_COMPUTE_RETURN_ERROR_ON(padding.size() > 4);
+
+    // Validate destination if initialized
+    if(dst->total_size() != 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(misc::shape_calculator::compute_padded_shape(src->tensor_shape(), padding), dst->tensor_shape());
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+    }
+
+    return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(const ITensorInfo *src, ITensorInfo *dst)
+{
+    // Destination auto inizialitation if not yet initialized
+    auto_init_if_empty(*dst, *src);
+    return std::make_pair(Status{}, calculate_max_window(*dst));
+}
+
+std::pair<Status, Window> validate_and_configure_window_with_padding(const ITensorInfo *src, ITensorInfo *dst, const PaddingList &padding)
+{
+    const TensorShape src_shape    = src->tensor_shape();
+    const TensorShape padded_shape = misc::shape_calculator::compute_padded_shape(src_shape, padding);
+    auto_init_if_empty(*dst, src->clone()->set_tensor_shape(padded_shape));
+    // Configure window
+    const Window win = calculate_max_window(*dst, dst->dimension(0));
+    return std::make_pair(Status{}, win);
+}
+
+} // namespace
+
+void CpuCopyKernel::configure(const ITensorInfo *src, ITensorInfo *dst, const PaddingList &padding)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, padding));
+
+    _padding = padding;
+
+    std::pair<Status, Window> win_config;
+    if(padding.empty())
+    {
+        win_config = validate_and_configure_window(src, dst);
+    }
+    else
+    {
+        win_config = validate_and_configure_window_with_padding(src, dst, padding);
+    }
+
+    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+    ICpuKernel::configure(win_config.second);
+}
+
+Status CpuCopyKernel::validate(const arm_compute::ITensorInfo *src, const arm_compute::ITensorInfo *dst, const PaddingList &padding)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, padding));
+
+    if(padding.empty())
+    {
+        ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get()).first);
+    }
+    else
+    {
+        ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window_with_padding(src->clone().get(), dst->clone().get(), padding).first);
+    }
+
+    return Status{};
+}
+
+void CpuCopyKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+
+    const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
+    auto       dst = tensors.get_tensor(TensorType::ACL_DST);
+
+    if(_padding.empty())
+    {
+        Window dst_window{ window };
+        dst_window.set(Window::DimX, Window::Dimension(dst_window.x().start(), dst_window.x().end(), src->info()->dimension(0)));
+        Window out_slice = dst_window.first_slice_window_1D();
+        do
+        {
+            Iterator src_it(src, out_slice);
+            Iterator dst_it(dst, out_slice);
+
+            execute_window_loop(out_slice, [&](const Coordinates &)
+            {
+                memcpy(dst_it.ptr(), src_it.ptr(), dst->info()->dimension(0) * dst->info()->element_size());
+            },
+            src_it, dst_it);
+        }
+        while(dst_window.slide_window_slice_1D(out_slice));
+    }
+    else
+    {
+        Window src_window{ window };
+        src_window.set(Window::DimX, Window::Dimension(0, window.x().end() - _padding[0].first, src->info()->dimension(0)));
+
+        Iterator     src_it(src, src_window);
+        Iterator     dst_it(dst, window);
+        const size_t row_size_in_bytes = src->info()->dimension(0) * src->info()->element_size();
+        execute_window_loop(window, [&](const Coordinates &)
+        {
+            auto dst_ptr = dst_it.ptr() + _padding[0].first * dst->info()->element_size();
+            std::memcpy(dst_ptr, src_it.ptr(), row_size_in_bytes);
+        },
+        src_it, dst_it);
+    }
+}
+
+const char *CpuCopyKernel::name() const
+{
+    return "CpuCopyKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuCopyKernel.h b/src/core/cpu/kernels/CpuCopyKernel.h
new file mode 100644
index 0000000..7e33bf4
--- /dev/null
+++ b/src/core/cpu/kernels/CpuCopyKernel.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_COPY_KERNEL_H
+#define ARM_COMPUTE_CPU_COPY_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel to perform a copy between two tensors */
+class CpuCopyKernel : public ICpuKernel
+{
+public:
+    CpuCopyKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuCopyKernel);
+    /** Configure kernel for a given list of arguments
+     *
+     * @param[in]  src     Source tensor. Data types supported: All
+     * @param[out] dst     Destination tensor. Data types supported: same as @p src.
+     * @param[in]  padding (Optional) Padding to be applied to the input tensor
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst, const PaddingList &padding = PaddingList());
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuCopyKernel
+     *
+     * @param[in] srd     Source tensor. Data types supported: All
+     * @param[in] dst     Destination tensor. Data types supported: same as @p src.
+     * @param[in] padding (Optional) Padding to be applied to the input tensor
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PaddingList &padding = PaddingList());
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+    const char *name() const override;
+
+private:
+    PaddingList _padding{};
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_COPY_KERNEL_H */
diff --git a/src/core/NEON/kernels/NEMemsetKernel.cpp b/src/core/cpu/kernels/CpuFillKernel.cpp
similarity index 73%
rename from src/core/NEON/kernels/NEMemsetKernel.cpp
rename to src/core/cpu/kernels/CpuFillKernel.cpp
index a8dfda3..d2280db 100644
--- a/src/core/NEON/kernels/NEMemsetKernel.cpp
+++ b/src/core/cpu/kernels/CpuFillKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -21,7 +21,7 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "src/core/NEON/kernels/NEMemsetKernel.h"
+#include "src/core/cpu/kernels/CpuFillKernel.h"
 
 #include "arm_compute/core/Helpers.h"
 #include "arm_compute/core/ITensor.h"
@@ -34,41 +34,41 @@
 
 namespace arm_compute
 {
-NEMemsetKernel::NEMemsetKernel()
-    : _tensor(nullptr), _constant_value()
+namespace cpu
 {
-}
-
-void NEMemsetKernel::configure(ITensor *tensor, const PixelValue &constant_value)
+namespace kernels
+{
+void CpuFillKernel::configure(const ITensorInfo *tensor, const PixelValue &constant_value)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
-    _tensor         = tensor;
     _constant_value = constant_value;
 
     // Configure kernel window
-    Window win = calculate_max_window(*tensor->info(), Steps());
-    INEKernel::configure(win);
+    Window win = calculate_max_window(*tensor, Steps());
+    ICpuKernel::configure(win);
 }
 
-void NEMemsetKernel::run(const Window &window, const ThreadInfo &info)
+void CpuFillKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
 {
     ARM_COMPUTE_UNUSED(info);
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+
+    auto inout = tensors.get_tensor(TensorType::ACL_SRC_DST);
 
     // Collapse all the batches on the third dimension
     bool   has_collapsed = true;
     Window collapsed     = window.collapse_if_possible(window, Window::DimZ, &has_collapsed);
     ARM_COMPUTE_ERROR_ON(!has_collapsed);
 
-    uint8_t *const start_valid_region = _tensor->ptr_to_element(_tensor->info()->valid_region().anchor);
+    uint8_t *const start_valid_region = inout->ptr_to_element(inout->info()->valid_region().anchor);
     const auto     window_width       = static_cast<int>(collapsed.x().end()) - static_cast<int>(collapsed.x().start());
-    const size_t   element_size       = _tensor->info()->element_size();
+    const size_t   element_size       = inout->info()->element_size();
 
     // Unroll X dimension
     collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
 
-    Iterator tensor_it(_tensor, collapsed);
+    Iterator tensor_it(inout, collapsed);
     execute_window_loop(collapsed, [&](const Coordinates &)
     {
         uint8_t *base_addr = start_valid_region + tensor_it.offset();
@@ -81,4 +81,11 @@
     },
     tensor_it);
 }
+
+const char *CpuFillKernel::name() const
+{
+    return "CpuFillKernel";
+}
+} // namespace kernels
+} // namespace cpu
 } // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuFillKernel.h b/src/core/cpu/kernels/CpuFillKernel.h
new file mode 100644
index 0000000..9afdee4
--- /dev/null
+++ b/src/core/cpu/kernels/CpuFillKernel.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_FILL_KERNEL_H
+#define ARM_COMPUTE_CPU_FILL_KERNEL_H
+
+#include "arm_compute/core/PixelValue.h"
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel for filling a tensor with a given constant value */
+class CpuFillKernel : public ICpuKernel
+{
+public:
+    CpuFillKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuFillKernel);
+    /** Configure kernel for a given list of arguments
+     *
+     * @param[in,out] tensor         Tensor to fill. Supported data types: All
+     * @param[in]     constant_value The value used to fill the planes of the tensor
+     */
+    void configure(const ITensorInfo *tensor, const PixelValue &constant_value);
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+    const char *name() const override;
+
+private:
+    PixelValue _constant_value{};
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_FILL_KERNEL_H */
diff --git a/src/core/cpu/kernels/CpuFloorKernel.h b/src/core/cpu/kernels/CpuFloorKernel.h
index 25d78c7..2680871 100644
--- a/src/core/cpu/kernels/CpuFloorKernel.h
+++ b/src/core/cpu/kernels/CpuFloorKernel.h
@@ -39,7 +39,7 @@
 public:
     CpuFloorKernel() = default;
     ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuFloorKernel);
-    /** Set the source, destination of the kernel
+    /** Configure kernel for a given list of arguments
      *
      * @param[in]  src Source tensor. Data type supported: F16/F32.
      * @param[out] dst Destination tensor. Same as @p src
diff --git a/src/core/cpu/kernels/CpuPermuteKernel.cpp b/src/core/cpu/kernels/CpuPermuteKernel.cpp
new file mode 100644
index 0000000..e3055f5
--- /dev/null
+++ b/src/core/cpu/kernels/CpuPermuteKernel.cpp
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuPermuteKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+namespace
+{
+#include "src/core/NEON/kernels/convolution/common/shims.hpp"
+} // namespace
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+inline bool is_permutation_supported(const PermutationVector &v)
+{
+    static const std::array<PermutationVector, 2> permutations2 =
+    {
+        {
+            PermutationVector(0U, 1U),
+            PermutationVector(1U, 0U),
+        }
+    };
+    static const std::array<PermutationVector, 6> permutations3 =
+    {
+        {
+            PermutationVector(2U, 0U, 1U),
+            PermutationVector(1U, 2U, 0U),
+            PermutationVector(0U, 1U, 2U),
+            PermutationVector(0U, 2U, 1U),
+            PermutationVector(1U, 0U, 2U),
+            PermutationVector(2U, 1U, 0U),
+        }
+    };
+    static const std::array<PermutationVector, 24> permutations4 =
+    {
+        {
+            PermutationVector(0U, 1U, 2U, 3U),
+            PermutationVector(1U, 0U, 2U, 3U),
+            PermutationVector(2U, 0U, 1U, 3U),
+            PermutationVector(0U, 2U, 1U, 3U),
+            PermutationVector(1U, 2U, 0U, 3U),
+            PermutationVector(2U, 1U, 0U, 3U),
+            PermutationVector(2U, 1U, 3U, 0U),
+            PermutationVector(1U, 2U, 3U, 0U),
+            PermutationVector(3U, 2U, 1U, 0U),
+            PermutationVector(2U, 3U, 1U, 0U),
+            PermutationVector(1U, 3U, 2U, 0U),
+            PermutationVector(3U, 1U, 2U, 0U),
+            PermutationVector(3U, 0U, 2U, 1U),
+            PermutationVector(0U, 3U, 2U, 1U),
+            PermutationVector(2U, 3U, 0U, 1U),
+            PermutationVector(3U, 2U, 0U, 1U),
+            PermutationVector(0U, 2U, 3U, 1U),
+            PermutationVector(2U, 0U, 3U, 1U),
+            PermutationVector(1U, 0U, 3U, 2U),
+            PermutationVector(0U, 1U, 3U, 2U),
+            PermutationVector(3U, 1U, 0U, 2U),
+            PermutationVector(1U, 3U, 0U, 2U),
+            PermutationVector(0U, 3U, 1U, 2U),
+            PermutationVector(3U, 0U, 1U, 2U)
+        }
+    };
+
+    return (permutations2.end() != std::find(permutations2.begin(), permutations2.end(), v)) || (permutations3.end() != std::find(permutations3.begin(), permutations3.end(), v))
+           || (permutations4.end() != std::find(permutations4.begin(), permutations4.end(), v));
+}
+
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(!is_permutation_supported(perm), "PermutationVector not supported.");
+
+    const TensorShape dst_shape = misc::shape_calculator::compute_permutation_output_shape(*src, perm);
+
+    // Validate configured destination
+    if(dst->total_size() != 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), dst_shape);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+    }
+
+    return Status{};
+}
+
+template <typename T>
+void run_permute(const Window &window, const ITensor *src, const ITensor *dst, const PermutationVector &perm)
+{
+    const DataLayout src_layout = src->info()->data_layout();
+
+    // Source window
+    Window window_src = window;
+
+    // we only support these two configs in src/core/NEON/kernels/convolution/common/shims.hpp, for all others
+    // we have to fall back to C++
+    if((src_layout == DataLayout::NCHW && perm == PermutationVector{ 2U, 0U, 1U }) || (src_layout == DataLayout::NHWC && perm == PermutationVector{ 1U, 2U, 0U }))
+    {
+        window_src.set(Window::DimX, Window::Dimension(window.x().start(), window.x().end(), window.x().end() - window.x().start()));
+        window_src.set(Window::DimY, Window::Dimension(window.y().start(), window.y().end(), window.y().end() - window.y().start()));
+        window_src.set(Window::DimZ, Window::Dimension(window.z().start(), window.z().end(), window.z().end() - window.z().start()));
+        window_src.set(3, Window::Dimension(window[3].start(), window[3].end(), window[3].end() - window[3].start()));
+    }
+
+    // Destination window
+    Window                  window_dst(window);
+    const Window::Dimension zero_window = Window::Dimension(0, 0, 0);
+    for(size_t d = 0; d <= dst->info()->num_dimensions(); ++d)
+    {
+        window_dst.set(d, zero_window);
+    }
+
+    // Create iterators
+    Iterator src_it(src, window_src);
+    Iterator dst_it(dst, window_dst);
+
+    int in_row_stride     = 0;
+    int in_col_stride     = 0;
+    int in_channel_stride = 0;
+    int in_batch_stride   = 0;
+    int n_cols            = 0;
+    int n_rows            = 0;
+    int n_channels        = 0;
+    int n_batches         = 0;
+
+    switch(src_layout)
+    {
+        case DataLayout::NCHW:
+        {
+            in_row_stride     = src->info()->strides_in_bytes().y() / sizeof(T);
+            in_channel_stride = src->info()->strides_in_bytes().z() / sizeof(T);
+            in_batch_stride   = src->info()->strides_in_bytes()[3] / sizeof(T);
+            n_cols            = src->info()->tensor_shape().x();
+            n_rows            = window_src.y().step();
+            n_channels        = src->info()->tensor_shape().z();
+            n_batches         = src->info()->tensor_shape()[3];
+            break;
+        }
+        case DataLayout::NHWC:
+        {
+            in_col_stride   = src->info()->strides_in_bytes().y() / sizeof(T);
+            in_row_stride   = src->info()->strides_in_bytes().z() / sizeof(T);
+            in_batch_stride = src->info()->strides_in_bytes()[3] / sizeof(T);
+            n_channels      = src->info()->tensor_shape().x();
+            n_cols          = window_src.y().step();
+            n_rows          = src->info()->tensor_shape().z();
+            n_batches       = src->info()->tensor_shape()[3];
+            break;
+        }
+        default:
+        {
+            ARM_COMPUTE_ERROR("Invalid source data layout.");
+            break;
+        }
+    }
+
+    // CHW -> HWC
+    if(src_layout == DataLayout::NCHW && perm == PermutationVector{ 2U, 0U, 1U })
+    {
+        const int out_channel_stride = dst->info()->strides_in_bytes().x() / sizeof(T);
+        const int out_col_stride     = dst->info()->strides_in_bytes().y() / sizeof(T);
+        const int out_row_stride     = dst->info()->strides_in_bytes().z() / sizeof(T);
+        const int out_batch_stride   = dst->info()->strides_in_bytes()[3] / sizeof(T);
+        execute_window_loop(window_src, [&](const Coordinates & id)
+        {
+            const int idx = id[0] * out_col_stride + id[1] * out_row_stride + id[2] * out_channel_stride;
+            reorder::nchw_to_nhwc(reinterpret_cast<const T *>(src_it.ptr()), reinterpret_cast<T *>(dst_it.ptr()) + idx,
+                                  n_batches, n_channels, n_rows, n_cols,
+                                  in_batch_stride, in_channel_stride, in_row_stride,
+                                  out_batch_stride, out_row_stride, out_col_stride);
+        },
+        src_it, dst_it);
+    }
+    // HWC -> CHW
+    else if(src_layout == DataLayout::NHWC && perm == PermutationVector{ 1U, 2U, 0U })
+    {
+        const int out_col_stride     = dst->info()->strides_in_bytes().x() / sizeof(T);
+        const int out_row_stride     = dst->info()->strides_in_bytes().y() / sizeof(T);
+        const int out_channel_stride = dst->info()->strides_in_bytes().z() / sizeof(T);
+        const int out_batch_stride   = dst->info()->strides_in_bytes()[3] / sizeof(T);
+        execute_window_loop(window_src, [&](const Coordinates & id)
+        {
+            const int idx = id[0] * out_channel_stride + id[1] * out_col_stride + id[2] * out_row_stride;
+            reorder::nhwc_to_nchw(reinterpret_cast<const T *>(src_it.ptr()), reinterpret_cast<T *>(dst_it.ptr()) + idx,
+                                  n_batches, n_rows, n_cols, n_channels,
+                                  in_batch_stride, in_row_stride, in_col_stride,
+                                  out_batch_stride, out_channel_stride, out_row_stride);
+        },
+        src_it, dst_it);
+    }
+    else
+    {
+        // All other cases fall back to C++
+        // Permute strides
+        Strides strides      = dst->info()->strides_in_bytes();
+        Strides perm_strides = strides;
+        permute_strides(perm_strides, perm);
+        const int perm_stride_3 = src->info()->num_dimensions() >= 4 ? perm_strides[3] : 0;
+        execute_window_loop(window, [&](const Coordinates & id)
+        {
+            const int idx                                = id[0] * perm_strides[0] + id[1] * perm_strides[1] + id[2] * perm_strides[2] + id[3] * perm_stride_3;
+            *(reinterpret_cast<T *>(dst_it.ptr() + idx)) = *(reinterpret_cast<const T *>(src_it.ptr()));
+        },
+        src_it, dst_it);
+    }
+}
+} // namespace
+
+void CpuPermuteKernel::configure(const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+    const TensorShape dst_shape = misc::shape_calculator::compute_permutation_output_shape(*src, perm);
+    // Destination auto inizialitation if not yet initialized
+    auto_init_if_empty(*dst, src->clone()->set_tensor_shape(dst_shape));
+
+    // Perform validation step
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, perm));
+
+    _perm = perm;
+
+    // Configure kernel window
+    Window win = calculate_max_window(*src, Steps());
+
+    // The NEPermute doesn't need padding so update_window_and_padding() can be skipped
+    Coordinates coord;
+    coord.set_num_dimensions(dst->num_dimensions());
+    dst->set_valid_region(ValidRegion(coord, dst->tensor_shape()));
+
+    ICpuKernel::configure(win);
+}
+
+Status CpuPermuteKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, perm));
+    return Status{};
+}
+
+void CpuPermuteKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+
+    const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
+    auto       dst = tensors.get_tensor(TensorType::ACL_DST);
+
+    switch(src->info()->element_size())
+    {
+        case 1:
+            run_permute<uint8_t>(window, src, dst, _perm);
+            break;
+        case 2:
+            run_permute<uint16_t>(window, src, dst, _perm);
+            break;
+        case 4:
+            run_permute<uint32_t>(window, src, dst, _perm);
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Element size not supported");
+            break;
+    }
+}
+
+const char *CpuPermuteKernel::name() const
+{
+    return "CpuPermuteKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/core/cpu/kernels/CpuPermuteKernel.h b/src/core/cpu/kernels/CpuPermuteKernel.h
new file mode 100644
index 0000000..9c59d5b
--- /dev/null
+++ b/src/core/cpu/kernels/CpuPermuteKernel.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_PERMUTE_KERNEL_H
+#define ARM_COMPUTE_CPU_PERMUTE_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Kernel to perform tensor permutation given a permutation vector */
+class CpuPermuteKernel : public ICpuKernel
+{
+public:
+    CpuPermuteKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuPermuteKernel);
+    /** Configure kernel for a given list of arguments
+     *
+     * @note Arbitrary permutation vectors are supported with rank not greater than 4
+     *
+     * @param[in]  src  Srouce tensor to permute. Data types supported: All
+     * @param[out] dst  Destination tensor. Data types supported: Same as @p src
+     * @param[in]  perm Permutation vector
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm);
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuPermuteKernel
+     *
+     * @note Arbitrary permutation vectors are supported with rank not greater than 4
+     *
+     * @param[in] src  Source tensor to permute. Data types supported: All
+     * @param[in] dst  Destination tensor. Data types supported: Same as @p src
+     * @param[in] perm Permutation vector
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm);
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+    const char *name() const override;
+
+private:
+    PermutationVector _perm{};
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_PERMUTE_KERNEL_H */
diff --git a/src/core/cpu/kernels/CpuReshapeKernel.cpp b/src/core/cpu/kernels/CpuReshapeKernel.cpp
new file mode 100644
index 0000000..068f5d0
--- /dev/null
+++ b/src/core/cpu/kernels/CpuReshapeKernel.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/core/cpu/kernels/CpuReshapeKernel.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/IAccessWindow.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/AccessWindowStatic.h"
+#include "src/core/CPP/Validate.h"
+#include "src/core/NEON/INEKernel.h"
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/core/helpers/WindowHelpers.h"
+
+#include <cstdint>
+
+/** [NEReshapeLayerKernel Kernel] **/
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+    // Note: ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(src) is not needed here as this kernel doesn't use NEON FP16 instructions.
+    ARM_COMPUTE_RETURN_ERROR_ON(src->data_type() == DataType::UNKNOWN);
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
+    ARM_COMPUTE_RETURN_ERROR_ON(src->tensor_shape().total_size() != dst->tensor_shape().total_size());
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
+
+    return Status{};
+}
+
+template <typename T>
+inline void reshape_tensor(const Window &window, const ITensor *src, ITensor *dst)
+{
+    const TensorShape &src_shape = src->info()->tensor_shape();
+    const TensorShape &dst_shape = dst->info()->tensor_shape();
+    Coordinates        dst_coord{};
+
+    Iterator src_it(src, window);
+
+    execute_window_loop(window, [&](const Coordinates & id)
+    {
+        dst_coord                                              = index2coords(dst_shape, coords2index(src_shape, id));
+        *reinterpret_cast<T *>(dst->ptr_to_element(dst_coord)) = *reinterpret_cast<T *>(src_it.ptr());
+    },
+    src_it);
+}
+} // namespace
+
+void CpuReshapeKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst));
+
+    // Configure kernel window
+    Window win = calculate_max_window(*src);
+
+    // Set the destination valid region
+    dst->set_valid_region(ValidRegion(Coordinates(), dst->tensor_shape()));
+
+    ICpuKernel::configure(win);
+}
+
+Status CpuReshapeKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst));
+
+    return Status{};
+}
+
+void CpuReshapeKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
+{
+    ARM_COMPUTE_UNUSED(info);
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+
+    const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
+    auto       dst = tensors.get_tensor(TensorType::ACL_DST);
+
+    switch(src->info()->data_type())
+    {
+        case DataType::U8:
+        case DataType::S8:
+        case DataType::QASYMM8:
+        case DataType::QASYMM8_SIGNED:
+            reshape_tensor<uint8_t>(window, src, dst);
+            break;
+        case DataType::U16:
+        case DataType::S16:
+        case DataType::F16:
+            reshape_tensor<uint16_t>(window, src, dst);
+            break;
+        case DataType::U32:
+        case DataType::S32:
+        case DataType::F32:
+            reshape_tensor<uint32_t>(window, src, dst);
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Unsupported data type!");
+    }
+}
+
+const char *CpuReshapeKernel::name() const
+{
+    return "CpuReshapeKernel";
+}
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+/** [NEReshapeLayerKernel Kernel] **/
diff --git a/src/core/cpu/kernels/CpuReshapeKernel.h b/src/core/cpu/kernels/CpuReshapeKernel.h
new file mode 100644
index 0000000..add6782
--- /dev/null
+++ b/src/core/cpu/kernels/CpuReshapeKernel.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2017-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_RESHAPE_KERNEL_H
+#define ARM_COMPUTE_CPU_RESHAPE_KERNEL_H
+
+#include "src/core/common/Macros.h"
+#include "src/core/cpu/ICpuKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+namespace kernels
+{
+/** Interface for the kernel to perform tensor reshaping */
+class CpuReshapeKernel : public ICpuKernel
+{
+public:
+    CpuReshapeKernel() = default;
+    ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuReshapeKernel);
+    /** Configure kernel for a given list of arguments
+     *
+     * @param[in]  src Source tensor info. Data type supported: All
+     * @param[out] dst Destination tensor info. Data type supported: Same as @p input
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuReshapeKernel
+     *
+     * @param[in] src Source tensor info. Data type supported: All
+     * @param[in] dst Destination tensor info. Data type supported: Same as @p src
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
+
+    // Inherited methods overridden:
+    void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
+    const char *name() const override;
+};
+} // namespace kernels
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_RESHAPE_KERNEL_H */
diff --git a/src/graph/backends/NEON/NENodeValidator.cpp b/src/graph/backends/NEON/NENodeValidator.cpp
index 0f824aa..75bba4c 100644
--- a/src/graph/backends/NEON/NENodeValidator.cpp
+++ b/src/graph/backends/NEON/NENodeValidator.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -39,7 +39,6 @@
 #include "src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.h"
 #include "src/core/NEON/kernels/NEGEMMTranspose1xWKernel.h"
 #include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
-#include "src/core/NEON/kernels/NEReshapeLayerKernel.h"
 #include "src/core/NEON/kernels/NEWeightsReshapeKernel.h"
 #include "support/Cast.h"
 
diff --git a/src/runtime/NEON/functions/NECopy.cpp b/src/runtime/NEON/functions/NECopy.cpp
index 11707cb..20642b5 100644
--- a/src/runtime/NEON/functions/NECopy.cpp
+++ b/src/runtime/NEON/functions/NECopy.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,23 +23,51 @@
  */
 #include "arm_compute/runtime/NEON/functions/NECopy.h"
 
-#include "src/core/NEON/kernels/NECopyKernel.h"
+#include "arm_compute/core/Validate.h"
+#include "src/runtime/cpu/operators/CpuCopy.h"
 
 #include <utility>
 
 namespace arm_compute
 {
-NECopy::~NECopy() = default;
+struct NECopy::Impl
+{
+    const ITensor                *src{ nullptr };
+    ITensor                      *dst{ nullptr };
+    std::unique_ptr<cpu::CpuCopy> op{ nullptr };
+};
+
+NECopy::NECopy()
+    : _impl(std::make_unique<Impl>())
+{
+}
+NECopy::NECopy(NECopy &&) = default;
+NECopy &NECopy::operator=(NECopy &&) = default;
+NECopy::~NECopy()                    = default;
 
 void NECopy::configure(ITensor *input, ITensor *output)
 {
-    auto k = std::make_unique<NECopyKernel>();
-    k->configure(input, output);
-    _kernel = std::move(k);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+    _impl->src = input;
+    _impl->dst = output;
+    _impl->op  = std::make_unique<cpu::CpuCopy>();
+    _impl->op->configure(input->info(), output->info());
 }
 
-Status NECopy::validate(const arm_compute::ITensorInfo *input, const arm_compute::ITensorInfo *output)
+Status NECopy::validate(const ITensorInfo *input, const ITensorInfo *output)
 {
-    return NECopyKernel::validate(input, output);
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuCopy::validate(input, output));
+
+    return Status{};
+}
+
+void NECopy::run()
+{
+    ITensorPack pack;
+    pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+    pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+    _impl->op->run(pack);
 }
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp b/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp
index 60a747d..56fc2e4 100644
--- a/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp
+++ b/src/runtime/NEON/functions/NEFFTConvolutionLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -27,7 +27,6 @@
 #include "arm_compute/core/Utils.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/NEON/kernels/NECopyKernel.h"
 #include "src/core/NEON/kernels/NEFFTDigitReverseKernel.h"
 #include "src/core/NEON/kernels/NEFFTRadixStageKernel.h"
 #include "src/core/NEON/kernels/NEFFTScaleKernel.h"
diff --git a/src/runtime/NEON/functions/NEFill.cpp b/src/runtime/NEON/functions/NEFill.cpp
index 74e366a..ee539fd 100644
--- a/src/runtime/NEON/functions/NEFill.cpp
+++ b/src/runtime/NEON/functions/NEFill.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,18 +23,40 @@
  */
 #include "arm_compute/runtime/NEON/functions/NEFill.h"
 
-#include "arm_compute/core/Window.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NEMemsetKernel.h"
+#include "arm_compute/core/Validate.h"
+#include "src/runtime/cpu/operators/CpuFill.h"
 
 #include <utility>
 
 namespace arm_compute
 {
+struct NEFill::Impl
+{
+    ITensor                      *tensor{ nullptr };
+    std::unique_ptr<cpu::CpuFill> op{ nullptr };
+};
+
+NEFill::NEFill()
+    : _impl(std::make_unique<Impl>())
+{
+}
+NEFill::NEFill(NEFill &&) = default;
+NEFill &NEFill::operator=(NEFill &&) = default;
+NEFill::~NEFill()                    = default;
+
 void NEFill::configure(ITensor *tensor, PixelValue constant_value)
 {
-    auto k = std::make_unique<NEMemsetKernel>();
-    k->configure(tensor, constant_value);
-    _kernel = std::move(k);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(tensor);
+
+    _impl->tensor = tensor;
+    _impl->op     = std::make_unique<cpu::CpuFill>();
+    _impl->op->configure(tensor->info(), constant_value);
+}
+
+void NEFill::run()
+{
+    ITensorPack pack;
+    pack.add_tensor(TensorType::ACL_SRC_DST, _impl->tensor);
+    _impl->op->run(pack);
 }
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEGenerateProposalsLayer.cpp b/src/runtime/NEON/functions/NEGenerateProposalsLayer.cpp
index f3a6a30..931fdb2 100644
--- a/src/runtime/NEON/functions/NEGenerateProposalsLayer.cpp
+++ b/src/runtime/NEON/functions/NEGenerateProposalsLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -25,7 +25,6 @@
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NECopyKernel.h"
 #include "src/core/NEON/kernels/NEFillBorderKernel.h"
 #include "src/core/NEON/kernels/NEGenerateProposalsLayerKernel.h"
 #include "src/core/NEON/kernels/NEPadLayerKernel.h"
diff --git a/src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp b/src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp
index da6260b..656777d 100644
--- a/src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp
+++ b/src/runtime/NEON/functions/NEMaxUnpoolingLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2020 Arm Limited.
+ * Copyright (c) 2020-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,26 +24,26 @@
 #include "arm_compute/runtime/NEON/functions/NEMaxUnpoolingLayer.h"
 
 #include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/Validate.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "arm_compute/runtime/NEON/functions/NEFill.h"
 #include "src/core/NEON/kernels/NEMaxUnpoolingLayerKernel.h"
-#include "src/core/NEON/kernels/NEMemsetKernel.h"
 
 namespace arm_compute
 {
 NEMaxUnpoolingLayer::~NEMaxUnpoolingLayer() = default;
 
 NEMaxUnpoolingLayer::NEMaxUnpoolingLayer()
-
-    : _memset_kernel(), _unpooling_layer_kernel()
+    : _fill_func(), _unpooling_layer_kernel()
 {
 }
 
 void NEMaxUnpoolingLayer::configure(ITensor *input, ITensor *indices, ITensor *output, const PoolingLayerInfo &pool_info)
 {
     const PixelValue zero_value(0.f);
-    _memset_kernel          = std::make_unique<NEMemsetKernel>();
+    _fill_func              = std::make_unique<NEFill>();
     _unpooling_layer_kernel = std::make_unique<NEMaxUnpoolingLayerKernel>();
-    _memset_kernel->configure(output, zero_value);
+    _fill_func->configure(output, zero_value);
     _unpooling_layer_kernel->configure(input, indices, output, pool_info);
 }
 
@@ -54,7 +54,7 @@
 
 void NEMaxUnpoolingLayer::run()
 {
-    NEScheduler::get().schedule(_memset_kernel.get(), Window::DimY);
+    _fill_func->run();
     NEScheduler::get().schedule(_unpooling_layer_kernel.get(), Window::DimY);
 }
 } /* namespace arm_compute */
diff --git a/src/runtime/NEON/functions/NEPadLayer.cpp b/src/runtime/NEON/functions/NEPadLayer.cpp
index 88a73b8..531b06d 100644
--- a/src/runtime/NEON/functions/NEPadLayer.cpp
+++ b/src/runtime/NEON/functions/NEPadLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -27,7 +27,6 @@
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "src/core/NEON/kernels/NECopyKernel.h"
 #include "src/core/NEON/kernels/NEPadLayerKernel.h"
 #include "src/core/helpers/AutoConfiguration.h"
 
@@ -52,7 +51,7 @@
 NEPadLayer::~NEPadLayer() = default;
 
 NEPadLayer::NEPadLayer()
-    : _copy_kernel(), _pad_kernel(), _mode(), _padding(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
+    : _copy_function(), _pad_kernel(), _mode(), _padding(), _num_dimensions(0), _slice_functions(), _concat_functions(), _slice_results(), _concat_results()
 {
 }
 
@@ -200,8 +199,7 @@
     else
     {
         // Copy the input to the whole output if no padding is applied
-        _copy_kernel = std::make_unique<NECopyKernel>();
-        _copy_kernel->configure(input, output);
+        _copy_function.configure(input, output);
     }
 }
 
@@ -286,7 +284,7 @@
     }
     else
     {
-        NEScheduler::get().schedule(_copy_kernel.get(), Window::DimY);
+        _copy_function.run();
     }
 }
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NEPermute.cpp b/src/runtime/NEON/functions/NEPermute.cpp
index cceb22f..257c1a2 100644
--- a/src/runtime/NEON/functions/NEPermute.cpp
+++ b/src/runtime/NEON/functions/NEPermute.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,19 +23,52 @@
  */
 #include "arm_compute/runtime/NEON/functions/NEPermute.h"
 
-#include "src/core/NEON/kernels/NEPermuteKernel.h"
+#include "arm_compute/core/Validate.h"
+#include "src/runtime/cpu/operators/CpuPermute.h"
 
 namespace arm_compute
 {
+struct NEPermute::Impl
+{
+    const ITensor                   *src{ nullptr };
+    ITensor                         *dst{ nullptr };
+    std::unique_ptr<cpu::CpuPermute> op{ nullptr };
+};
+
+NEPermute::NEPermute()
+    : _impl(std::make_unique<Impl>())
+{
+}
+
+NEPermute::NEPermute(NEPermute &&) = default;
+
+NEPermute &NEPermute::operator=(NEPermute &&) = default;
+
+NEPermute::~NEPermute() = default;
+
 void NEPermute::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
 {
-    auto k = std::make_unique<NEPermuteKernel>();
-    k->configure(input, output, perm);
-    _kernel = std::move(k);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
+    _impl->src = input;
+    _impl->dst = output;
+    _impl->op  = std::make_unique<cpu::CpuPermute>();
+    _impl->op->configure(input->info(), output->info(), perm);
 }
 
 Status NEPermute::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
 {
-    return NEPermuteKernel::validate(input, output, perm);
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuPermute::validate(input, output, perm));
+
+    return Status{};
+}
+
+void NEPermute::run()
+{
+    ITensorPack pack;
+    pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+    pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+    _impl->op->run(pack);
 }
 } // namespace arm_compute
diff --git a/src/runtime/NEON/functions/NERNNLayer.cpp b/src/runtime/NEON/functions/NERNNLayer.cpp
index 93e37cc..63e8103 100644
--- a/src/runtime/NEON/functions/NERNNLayer.cpp
+++ b/src/runtime/NEON/functions/NERNNLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -32,7 +32,6 @@
 #include "arm_compute/runtime/NEON/NEScheduler.h"
 #include "src/core/NEON/kernels/NEConvertFullyConnectedWeightsKernel.h"
 #include "src/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
-#include "src/core/NEON/kernels/NECopyKernel.h"
 #include "src/core/NEON/kernels/NEGEMMInterleave4x4Kernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
 #include "src/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
@@ -47,7 +46,7 @@
 NERNNLayer::~NERNNLayer() = default;
 
 NERNNLayer::NERNNLayer(std::shared_ptr<IMemoryManager> memory_manager)
-    : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_f(), _activation(), _fully_connected(memory_manager), _copy_kernel(), _fully_connected_out(), _gemm_output(), _add_output(),
+    : _memory_group(std::move(memory_manager)), _gemm_state_f(), _add_f(), _activation(), _fully_connected(memory_manager), _copy_f(), _fully_connected_out(), _gemm_output(), _add_output(),
       _is_prepared(false)
 {
 }
@@ -112,8 +111,7 @@
     _activation.configure(&_add_output, hidden_state, info);
     _add_output.allocator()->allocate();
 
-    _copy_kernel = std::make_unique<NECopyKernel>();
-    _copy_kernel->configure(hidden_state, output);
+    _copy_f.configure(hidden_state, output);
 }
 
 void NERNNLayer::run()
@@ -130,7 +128,7 @@
     _activation.run();
 
     // copy hidden out to output
-    NEScheduler::get().schedule(_copy_kernel.get(), Window::DimY);
+    _copy_f.run();
 }
 
 void NERNNLayer::prepare()
diff --git a/src/runtime/NEON/functions/NEReshapeLayer.cpp b/src/runtime/NEON/functions/NEReshapeLayer.cpp
index 9ad6a35..c0c78ea 100644
--- a/src/runtime/NEON/functions/NEReshapeLayer.cpp
+++ b/src/runtime/NEON/functions/NEReshapeLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,61 +24,41 @@
 #include "arm_compute/runtime/NEON/functions/NEReshapeLayer.h"
 
 #include "arm_compute/core/Validate.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "arm_compute/runtime/Types.h"
-#include "src/core/NEON/kernels/NEReshapeLayerKernel.h"
+#include "src/runtime/cpu/operators/CpuReshape.h"
 
 #include <utility>
 
 namespace arm_compute
 {
-namespace experimental
-{
-NEReshape::~NEReshape() = default;
-
-void NEReshape::configure(const ITensorInfo *input, ITensorInfo *output)
-{
-    auto k = std::make_unique<NEReshapeLayerKernel>();
-    k->configure(input, output);
-    _kernel = std::move(k);
-}
-
-Status NEReshape::validate(const ITensorInfo *input, const ITensorInfo *output)
-{
-    return arm_compute::NEReshapeLayerKernel::validate(input, output);
-}
-} // namespace experimental
-
 struct NEReshapeLayer::Impl
 {
-    const ITensor                           *src{ nullptr };
-    ITensor                                 *dst{ nullptr };
-    std::unique_ptr<experimental::NEReshape> op{ nullptr };
+    const ITensor                   *src{ nullptr };
+    ITensor                         *dst{ nullptr };
+    std::unique_ptr<cpu::CpuReshape> op{ nullptr };
 };
 
 NEReshapeLayer::NEReshapeLayer()
     : _impl(std::make_unique<Impl>())
 {
 }
-
 NEReshapeLayer::NEReshapeLayer(NEReshapeLayer &&) = default;
-
 NEReshapeLayer &NEReshapeLayer::operator=(NEReshapeLayer &&) = default;
-
-NEReshapeLayer::~NEReshapeLayer() = default;
+NEReshapeLayer::~NEReshapeLayer()                            = default;
 
 void NEReshapeLayer::configure(const ITensor *input, ITensor *output)
 {
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+
     _impl->src = input;
     _impl->dst = output;
-    _impl->op  = std::make_unique<experimental::NEReshape>();
+    _impl->op  = std::make_unique<cpu::CpuReshape>();
     _impl->op->configure(input->info(), output->info());
 }
 
 Status NEReshapeLayer::validate(const ITensorInfo *input, const ITensorInfo *output)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
-    ARM_COMPUTE_RETURN_ON_ERROR(experimental::NEReshape::validate(input, output));
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuReshape::validate(input, output));
 
     return Status{};
 }
diff --git a/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp b/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp
index 10b3841..e8a8424 100644
--- a/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp
+++ b/src/runtime/NEON/functions/NESpaceToBatchLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,7 +29,7 @@
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NEMemsetKernel.h"
+#include "arm_compute/runtime/NEON/functions/NEFill.h"
 #include "src/core/NEON/kernels/NESpaceToBatchLayerKernel.h"
 
 namespace arm_compute
@@ -37,7 +37,7 @@
 NESpaceToBatchLayer::~NESpaceToBatchLayer() = default;
 
 NESpaceToBatchLayer::NESpaceToBatchLayer()
-    : _space_to_batch_kernel(), _memset_kernel(), _has_padding(false)
+    : _space_to_batch_kernel(), _fill_f(), _has_padding(false)
 {
 }
 
@@ -47,9 +47,9 @@
 
     if(input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size())
     {
-        _has_padding   = true;
-        _memset_kernel = std::make_unique<NEMemsetKernel>();
-        _memset_kernel->configure(output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
+        _has_padding = true;
+        _fill_f      = std::make_unique<NEFill>();
+        _fill_f->configure(output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
     }
     _space_to_batch_kernel = std::make_unique<NESpaceToBatchLayerKernel>();
     _space_to_batch_kernel->configure(input, block_shape, paddings, output);
@@ -61,9 +61,9 @@
 
     if(input->info()->tensor_shape().total_size() != output->info()->tensor_shape().total_size())
     {
-        _has_padding   = true;
-        _memset_kernel = std::make_unique<NEMemsetKernel>();
-        _memset_kernel->configure(output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
+        _has_padding = true;
+        _fill_f      = std::make_unique<NEFill>();
+        _fill_f->configure(output, PixelValue(0, input->info()->data_type(), input->info()->quantization_info()));
     }
     _space_to_batch_kernel = std::make_unique<NESpaceToBatchLayerKernel>();
     _space_to_batch_kernel->configure(input, block_shape_x, block_shape_y, padding_left, padding_right, output);
@@ -89,7 +89,7 @@
     // Zero out output only if we have paddings
     if(_has_padding)
     {
-        NEScheduler::get().schedule(_memset_kernel.get(), Window::DimY);
+        _fill_f->run();
     }
     NEScheduler::get().schedule(_space_to_batch_kernel.get(), Window::DimY);
 }
diff --git a/src/runtime/cpu/operators/CpuActivation.h b/src/runtime/cpu/operators/CpuActivation.h
index 25bc903..a357b32 100644
--- a/src/runtime/cpu/operators/CpuActivation.h
+++ b/src/runtime/cpu/operators/CpuActivation.h
@@ -36,7 +36,7 @@
 public:
     /** Constructor */
     CpuActivation() = default;
-    /** Set the input and output tensor.
+    /** Configure operator for a given list of arguments
      *
      * @param[in]  input           Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32.
      * @param[out] output          Destination tensor info. Data type supported: same as @p src
diff --git a/src/runtime/cpu/operators/CpuCopy.cpp b/src/runtime/cpu/operators/CpuCopy.cpp
new file mode 100644
index 0000000..9fbe916
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuCopy.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuCopy.h"
+
+#include "src/core/cpu/kernels/CpuCopyKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void CpuCopy::configure(const ITensorInfo *src, ITensorInfo *dst)
+{
+    auto k = std::make_unique<kernels::CpuCopyKernel>();
+    k->configure(src, dst);
+    _kernel = std::move(k);
+}
+
+Status CpuCopy::validate(const ITensorInfo *src, const ITensorInfo *dst)
+{
+    return kernels::CpuCopyKernel::validate(src, dst);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuCopy.h b/src/runtime/cpu/operators/CpuCopy.h
new file mode 100644
index 0000000..5764613
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuCopy.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_COPY_H
+#define ARM_COMPUTE_CPU_COPY_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Basic function to run @ref CpuCopyKernel */
+class CpuCopy : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuCopy() = default;
+    /** Configure operator for a given list of arguments
+     *
+     * @param[in]  src Source tensor info. Data type supported: All
+     * @param[out] dst Destination info. Data type supported: Same as @p src
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuCopy
+     *
+     * @param[in] src Source tensor info. Data type supported: All
+     * @param[in] dst Destination tensor info. Data type supported: Same as @p src
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_COPY_H */
diff --git a/src/runtime/cpu/operators/CpuFill.cpp b/src/runtime/cpu/operators/CpuFill.cpp
new file mode 100644
index 0000000..081e30e
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuFill.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuFill.h"
+
+#include "src/core/cpu/kernels/CpuFillKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void CpuFill::configure(const ITensorInfo *tensor, PixelValue constant_value)
+{
+    auto k = std::make_unique<kernels::CpuFillKernel>();
+    k->configure(tensor, constant_value);
+    _kernel = std::move(k);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuFill.h b/src/runtime/cpu/operators/CpuFill.h
new file mode 100644
index 0000000..7a75f42
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuFill.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_FILL_H
+#define ARM_COMPUTE_CPU_FILL_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Basic function to run @ref CpuFillKernel */
+class CpuFill : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuFill() = default;
+    /** Configure operator for a given list of arguments
+     *
+     * @param[in,out] tensor         Tensor to fill. Supported data types: All
+     * @param[in]     constant_value The value used to fill the planes of the tensor
+     */
+    void configure(const ITensorInfo *tensor, PixelValue constant_value);
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_FILL_H */
diff --git a/src/runtime/cpu/operators/CpuFloor.h b/src/runtime/cpu/operators/CpuFloor.h
index 30c850a..86a01e3 100644
--- a/src/runtime/cpu/operators/CpuFloor.h
+++ b/src/runtime/cpu/operators/CpuFloor.h
@@ -36,7 +36,7 @@
 public:
     /** Constructor */
     CpuFloor() = default;
-    /** Set the input and output tensor.
+    /** Configure operator for a given list of arguments
      *
      * @param[in] src Source tensor info. Data types supported: F16/F32.
      * @param[in] dst Destination tensor info. Data type supported: same as @p src
diff --git a/src/runtime/cpu/operators/CpuPermute.cpp b/src/runtime/cpu/operators/CpuPermute.cpp
new file mode 100644
index 0000000..7fde1e3
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPermute.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuPermute.h"
+
+#include "src/core/cpu/kernels/CpuPermuteKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void CpuPermute::configure(const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm)
+{
+    auto k = std::make_unique<kernels::CpuPermuteKernel>();
+    k->configure(src, dst, perm);
+    _kernel = std::move(k);
+}
+
+Status CpuPermute::validate(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm)
+{
+    return kernels::CpuPermuteKernel::validate(src, dst, perm);
+}
+} // namesapce cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuPermute.h b/src/runtime/cpu/operators/CpuPermute.h
new file mode 100644
index 0000000..31ad77e
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPermute.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_PERMUTE_H
+#define ARM_COMPUTE_CPU_PERMUTE_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Basic function to run @ref CpuPermuteKernel */
+class CpuPermute : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuPermute() = default;
+    /** Configure operator for a given list of arguments
+     *
+     * @note Arbitrary permutation vectors are supported with rank not greater than 4
+     *
+     * @param[in]  src  Source tensor to permute. Data types supported: All
+     * @param[out] dst  Destintation tensor. Data types supported: Same as @p src
+     * @param[in]  perm Permutation vector
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst, const PermutationVector &perm);
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuPermute
+     *
+     * @note Arbitrary permutation vectors are supported with rank not greater than 4
+     *
+     * @param[in] src  Source tensor to permute. Data types supported: All
+     * @param[in] dst  Destination tensor. Data types supported: Same as @p dst
+     * @param[in] perm Permutation vector
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PermutationVector &perm);
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_RESHAPE_H */
diff --git a/src/runtime/cpu/operators/CpuReshape.cpp b/src/runtime/cpu/operators/CpuReshape.cpp
new file mode 100644
index 0000000..33c9cb8
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuReshape.cpp
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuReshape.h"
+
+#include "src/core/cpu/kernels/CpuReshapeKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+void CpuReshape::configure(const ITensorInfo *src, ITensorInfo *dst)
+{
+    auto k = std::make_unique<kernels::CpuReshapeKernel>();
+    k->configure(src, dst);
+    _kernel = std::move(k);
+}
+
+Status CpuReshape::validate(const ITensorInfo *src, const ITensorInfo *dst)
+{
+    return kernels::CpuReshapeKernel::validate(src, dst);
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuReshape.h b/src/runtime/cpu/operators/CpuReshape.h
new file mode 100644
index 0000000..b718b07
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuReshape.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_RESHAPE_H
+#define ARM_COMPUTE_CPU_RESHAPE_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Basic function to run @ref CpuReshapeKernel */
+class CpuReshape : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuReshape() = default;
+    /** Configure operator for a given list of arguments
+     *
+     * @param[in]  src Source tensor info. Data type supported: All
+     * @param[out] dst Destination info. Data type supported: Same as @p src
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuReshape
+     *
+     * @param[in] src Source tensor info. Data type supported: All
+     * @param[in] dst Destination tensor info. Data type supported: Same as @p src
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst);
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_RESHAPE_H */