COMPMID-3377: Async support to NEElementwiseUnaryLayerKernel kernels/functions

Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Change-Id: I208287b44ece051e95f891d43a691cb0ac6e56c5
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3419
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/CPP/CPPScheduler.h b/arm_compute/runtime/CPP/CPPScheduler.h
index 2ccb094..2f7951e 100644
--- a/arm_compute/runtime/CPP/CPPScheduler.h
+++ b/arm_compute/runtime/CPP/CPPScheduler.h
@@ -77,7 +77,7 @@
      * @param[in] inputs  Vector that contains the input tensors.
      * @param[in] outputs Vector that contains the output tensors.
      */
-    void schedule_op(ICPPKernel *kernel, const Hints &hints, const std::vector<InputTensor> &inputs, const std::vector<OutputTensor> &outputs) override;
+    void schedule_op(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs) override;
 
 protected:
     /** Will run the workloads in parallel using num_threads
@@ -87,7 +87,7 @@
     void run_workloads(std::vector<Workload> &workloads) override;
 
 private:
-    void schedule_common(ICPPKernel *kernel, const Hints &hints, const std::vector<InputTensor> &inputs, const std::vector<OutputTensor> &outputs);
+    void schedule_common(ICPPKernel *kernel, const Hints &hints, const InputTensorMap &inputs, const OutputTensorMap &outputs);
     struct Impl;
     std::unique_ptr<Impl> _impl;
 };