Remove stale/solved TODOs

Change-Id: I5c440f4c6ca4186adcfa926e6b7d924086671f29
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5520
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/CL/cl_kernels/softmax_layer.cl b/src/core/CL/cl_kernels/softmax_layer.cl
index 01f5de4..4d2d89d 100644
--- a/src/core/CL/cl_kernels/softmax_layer.cl
+++ b/src/core/CL/cl_kernels/softmax_layer.cl
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -306,7 +306,6 @@
     }
 #ifdef NON_MULTIPLE_OF_GRID_SIZE
     // How many work-items needed to complete the computation.
-    //TODO: Optimize this calculation (avoid %).
     int boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
     if(lid < boundary_workitems)
     {
@@ -417,7 +416,6 @@
         sum1D += data;
     }
 #ifdef NON_MULTIPLE_OF_GRID_SIZE
-    //TODO: Optimize the calculation (avoid %).
     boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
     if(lid < boundary_workitems)
     {
diff --git a/src/core/CL/cl_kernels/softmax_layer_quantized.cl b/src/core/CL/cl_kernels/softmax_layer_quantized.cl
index b7a6e00..4d5006d 100644
--- a/src/core/CL/cl_kernels/softmax_layer_quantized.cl
+++ b/src/core/CL/cl_kernels/softmax_layer_quantized.cl
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -324,7 +324,6 @@
     }
 #ifdef NON_MULTIPLE_OF_GRID_SIZE
     // How many work-items needed to complete the computation.
-    //TODO: Optimize this calculation (avoid %).
     int boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
     if(lid < boundary_workitems)
     {
@@ -429,7 +428,6 @@
         sum1D = sum1D + select(0, data_fp, data_diff >= (VEC_INT)(DIFF_MIN));
     }
 #ifdef NON_MULTIPLE_OF_GRID_SIZE
-    //TODO: Optimize the calculation (avoid %).
     boundary_workitems = (SRC_WIDTH % (GRID_SIZE * VECTOR_SIZE)) / VECTOR_SIZE;
     if(lid < boundary_workitems)
     {
diff --git a/src/core/NEON/NEMath.inl b/src/core/NEON/NEMath.inl
index da9d038..29df543 100644
--- a/src/core/NEON/NEMath.inl
+++ b/src/core/NEON/NEMath.inl
@@ -495,7 +495,6 @@
 
 inline float16x8_t vexpq_f16(float16x8_t x)
 {
-    // TODO (COMPMID-1535) : Revisit FP16 approximations
     const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
     const float32x4_t x_low  = vcvt_f32_f16(vget_low_f16(x));
 
@@ -505,7 +504,6 @@
 
 inline float16x8_t vlogq_f16(float16x8_t x)
 {
-    // TODO (COMPMID-1535) : Revisit FP16 approximations
     const float32x4_t x_high = vcvt_f32_f16(vget_high_f16(x));
     const float32x4_t x_low  = vcvt_f32_f16(vget_low_f16(x));
 
@@ -515,7 +513,6 @@
 
 inline float16x8_t vpowq_f16(float16x8_t val, float16x8_t n)
 {
-    // TODO (giaiod01) - COMPMID-1535
     float32x4_t n0_f32   = vcvt_f32_f16(vget_low_f16(n));
     float32x4_t n1_f32   = vcvt_f32_f16(vget_high_f16(n));
     float32x4_t val0_f32 = vcvt_f32_f16(vget_low_f16(val));