COMPMID-3147: Remove padding from NEDepthConvertLayerKernel

Change-Id: I0e388e9898ea82a7a1af16a987890696bb489547
Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2781
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp b/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
index f5fb9c0..f824f7a 100644
--- a/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEDepthConvertLayerKernel.cpp
@@ -31,6 +31,7 @@
 #include "arm_compute/core/NEON/NEMath.h"
 #include "arm_compute/core/TensorInfo.h"
 #include "arm_compute/core/Validate.h"
+#include "arm_compute/core/utils/misc/SaturateCast.h"
 
 #include <arm_neon.h>
 
@@ -90,21 +91,6 @@
 
     return Status{};
 }
-
-std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
-{
-    constexpr unsigned int num_elems_processed_per_iteration = 16;
-
-    Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
-
-    AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
-    AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
-    bool                   window_changed = update_window_and_padding(win, input_access, output_access);
-    output_access.set_valid_region(win, output->valid_region());
-
-    Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
-    return std::make_pair(err, win);
-}
 } // namespace
 
 NEDepthConvertLayerKernel::NEDepthConvertLayerKernel()
@@ -127,16 +113,17 @@
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), policy, shift));
 
     // Configure kernel window
-    auto win_config = validate_and_configure_window(input->info(), output->info());
-    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
-    ICPPKernel::configure(win_config.second);
+    Window      win = calculate_max_window(*input->info(), Steps());
+    Coordinates coord;
+    coord.set_num_dimensions(output->info()->num_dimensions());
+    output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
+
+    ICPPKernel::configure(win);
 }
 
 Status NEDepthConvertLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, ConvertPolicy policy, uint32_t shift)
 {
     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, policy, shift));
-    ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get()).first);
-
     return Status{};
 }
 
@@ -148,8 +135,15 @@
     ARM_COMPUTE_ERROR_ON_NULLPTR(_input, _output);
     ARM_COMPUTE_ERROR_ON(_input == _output);
 
-    Iterator input(_input, window);
-    Iterator output(_output, window);
+    const auto window_start_x = static_cast<int>(window.x().start());
+    const auto window_end_x   = static_cast<int>(window.x().end());
+    const int  window_step_x  = 16;
+
+    Window win{ window };
+    win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    Iterator input(_input, win);
+    Iterator output(_output, win);
 
     switch(_input->info()->data_type())
     {
@@ -162,20 +156,33 @@
                 case DataType::S16:
                 {
                     /* Up-conversion QASYMM8_SIGNED -> S16 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(input.ptr()));
+                        const auto input_ptr  = reinterpret_cast<const int8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
+                        int        x          = window_start_x;
 
-                        const int16x8x2_t texels =
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
-                            {
-                                vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
-                                vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
-                            }
-                        };
+                            const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
 
-                        vst1q_s16(reinterpret_cast<int16_t *>(output.ptr()), texels.val[0]);
-                        vst1q_s16(reinterpret_cast<int16_t *>(output.ptr()) + 8, texels.val[1]);
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
+                                    vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
+                                }
+                            };
+
+                            vst1q_s16(output_ptr + x, texels.val[0]);
+                            vst1q_s16(output_ptr + x + 8, texels.val[1]);
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<int16_t>(*(input_ptr + x) << _shift);
+                        }
                     },
                     input, output);
                     break;
@@ -183,22 +190,35 @@
                 case DataType::S32:
                 {
                     /* Up-conversion QASYMM8_SIGNED -> S32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(input.ptr()));
+                        const auto input_ptr  = reinterpret_cast<const int8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
+                        int        x          = window_start_x;
 
-                        const int16x8x2_t texels =
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
-                            {
-                                vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
-                                vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
-                            }
-                        };
+                            const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
 
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()), vmovl_s16(vget_low_s16(texels.val[0])));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 4, vmovl_s16(vget_high_s16(texels.val[0])));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 8, vmovl_s16(vget_low_s16(texels.val[1])));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 12, vmovl_s16(vget_high_s16(texels.val[1])));
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
+                                    vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
+                                }
+                            };
+
+                            vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
+                            vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
+                            vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
+                            vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) << _shift);
+                        }
                     },
                     input, output);
                     break;
@@ -206,21 +226,34 @@
                 case DataType::F32:
                 {
                     /* Up-conversion QASYMM8_SIGNED -> F32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(input.ptr()));
+                        const auto input_ptr  = reinterpret_cast<const int8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
 
-                        const int16x8x2_t texels =
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
+                            const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(input.ptr()));
+
+                            const int16x8x2_t texels =
                             {
-                                vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
-                                vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
-                            }
-                        };
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()), vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
+                                {
+                                    vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
+                                    vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
+                                }
+                            };
+                            vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
+                            vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
+                            vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
+                            vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<float>(*(input_ptr + x) << _shift);
+                        }
                     },
                     input, output);
                     break;
@@ -229,19 +262,32 @@
                 case DataType::F16:
                 {
                     /* Up-conversion QASYMM8_SIGNED -> F16 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<int8_t *>(input.ptr()));
+                        const auto input_ptr  = reinterpret_cast<const int8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
+                        int        x          = window_start_x;
 
-                        const int16x8x2_t texels =
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
+                            const int8x16_t texels_s8 = vld1q_s8(input_ptr + x);
+
+                            const int16x8x2_t texels =
                             {
-                                vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
-                                vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
-                            }
-                        };
-                        vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()), vcvtq_f16_s16(texels.val[0]));
-                        vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()) + 8, vcvtq_f16_s16(texels.val[1]));
+                                {
+                                    vshlq_s16(vmovl_s8(vget_low_s8(texels_s8)), b),
+                                    vshlq_s16(vmovl_s8(vget_high_s8(texels_s8)), b)
+                                }
+                            };
+                            vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0]));
+                            vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) << _shift);
+                        }
                     },
                     input, output);
                     break;
@@ -264,20 +310,34 @@
                 case DataType::S16:
                 {
                     /* Up-conversion U8 -> S16 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
+                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
 
-                        const int16x8x2_t texels =
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
-                            {
-                                vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
-                                vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
-                            }
-                        };
+                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
 
-                        vst1q_s16(reinterpret_cast<int16_t *>(output.ptr()), texels.val[0]);
-                        vst1q_s16(reinterpret_cast<int16_t *>(output.ptr()) + 8, texels.val[1]);
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
+                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
+                                }
+                            };
+
+                            vst1q_s16(output_ptr + x, texels.val[0]);
+                            vst1q_s16(output_ptr + x + 8, texels.val[1]);
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            auto in           = static_cast<int32_t>(*(input_ptr + x));
+                            *(output_ptr + x) = in << _shift;
+                        }
                     },
                     input, output);
                     break;
@@ -285,22 +345,36 @@
                 case DataType::S32:
                 {
                     /* Up-conversion U8 -> S32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
+                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
 
-                        const int16x8x2_t texels =
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
-                            {
-                                vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
-                                vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
-                            }
-                        };
+                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
 
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()), vmovl_s16(vget_low_s16(texels.val[0])));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 4, vmovl_s16(vget_high_s16(texels.val[0])));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 8, vmovl_s16(vget_low_s16(texels.val[1])));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 12, vmovl_s16(vget_high_s16(texels.val[1])));
+                            const int16x8x2_t texels =
+                            {
+                                {
+                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
+                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
+                                }
+                            };
+
+                            vst1q_s32(output_ptr + x, vmovl_s16(vget_low_s16(texels.val[0])));
+                            vst1q_s32(output_ptr + x + 4, vmovl_s16(vget_high_s16(texels.val[0])));
+                            vst1q_s32(output_ptr + x + 8, vmovl_s16(vget_low_s16(texels.val[1])));
+                            vst1q_s32(output_ptr + x + 12, vmovl_s16(vget_high_s16(texels.val[1])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            auto in           = static_cast<uint32_t>(*(input_ptr + x));
+                            *(output_ptr + x) = in << _shift;
+                        }
                     },
                     input, output);
                     break;
@@ -308,21 +382,35 @@
                 case DataType::F32:
                 {
                     /* Up-conversion U8 -> F32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
+                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
 
-                        const int16x8x2_t texels =
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
+                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
+
+                            const int16x8x2_t texels =
                             {
-                                vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
-                                vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
-                            }
-                        };
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()), vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
+                                {
+                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
+                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
+                                }
+                            };
+                            vst1q_f32(output_ptr + x, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[0]))));
+                            vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[0]))));
+                            vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(vmovl_s16(vget_low_s16(texels.val[1]))));
+                            vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(vmovl_s16(vget_high_s16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            auto in           = static_cast<uint32_t>(*(input_ptr + x));
+                            *(output_ptr + x) = static_cast<float>(in << _shift);
+                        }
                     },
                     input, output);
                     break;
@@ -331,42 +419,67 @@
                 case DataType::F16:
                 {
                     /* Up-conversion U8 -> F16 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
+                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
 
-                        const int16x8x2_t texels =
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
+                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
+
+                            const int16x8x2_t texels =
                             {
-                                vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
-                                vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
-                            }
-                        };
-                        vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()), vcvtq_f16_s16(texels.val[0]));
-                        vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()) + 8, vcvtq_f16_s16(texels.val[1]));
+                                {
+                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(texels_u8))), b),
+                                    vshlq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(texels_u8))), b)
+                                }
+                            };
+                            vst1q_f16(output_ptr + x, vcvtq_f16_s16(texels.val[0]));
+                            vst1q_f16(output_ptr + x + 8, vcvtq_f16_s16(texels.val[1]));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) << _shift);
+                        }
                     },
                     input, output);
                     break;
                 }
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
                 case DataType::U16:
                 {
                     /* Up-conversion U8 -> U16 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const uint8x16_t texels_u8 = vld1q_u8(input.ptr());
+                        const auto input_ptr  = reinterpret_cast<const uint8_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<uint16_t *>(output.ptr());
 
-                        const uint16x8x2_t texels =
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
-                            {
-                                vshlq_u16(vmovl_u8(vget_low_u8(texels_u8)), b),
-                                vshlq_u16(vmovl_u8(vget_high_u8(texels_u8)), b)
-                            }
-                        };
+                            const uint8x16_t texels_u8 = vld1q_u8(input_ptr + x);
 
-                        vst1q_u16(reinterpret_cast<uint16_t *>(output.ptr()), texels.val[0]);
-                        vst1q_u16(reinterpret_cast<uint16_t *>(output.ptr()) + 8, texels.val[1]);
+                            const uint16x8x2_t texels =
+                            {
+                                {
+                                    vshlq_u16(vmovl_u8(vget_low_u8(texels_u8)), b),
+                                    vshlq_u16(vmovl_u8(vget_high_u8(texels_u8)), b)
+                                }
+                            };
+
+                            vst1q_u16(output_ptr + x, texels.val[0]);
+                            vst1q_u16(output_ptr + x + 8, texels.val[1]);
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<uint16_t>(*(input_ptr + x)) << _shift;
+                        }
                     },
                     input, output);
                     break;
@@ -387,33 +500,59 @@
                     /* Down-conversion S16 -> QASYMM8_SIGNED */
                     if(ConvertPolicy::SATURATE == _policy)
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vqshlq_s16(vld1q_s16(reinterpret_cast<int16_t *>(input.ptr())), b),
-                                    vqshlq_s16(vld1q_s16(reinterpret_cast<int16_t *>(input.ptr()) + 8), b)
-                                }
-                            };
+                            const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
 
-                            vst1q_s8(reinterpret_cast<int8_t *>(output.ptr()), vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1])));
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int16x8x2_t texels =
+                                {
+                                    {
+                                        vqshlq_s16(vld1q_s16(input_ptr + x), b),
+                                        vqshlq_s16(vld1q_s16(input_ptr + x + 8), b)
+                                    }
+                                };
+
+                                vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(texels.val[0]), vqmovn_s16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
                     else
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vld1q_s16(reinterpret_cast<int16_t *>(input.ptr())), b),
-                                    vshlq_s16(vld1q_s16(reinterpret_cast<int16_t *>(input.ptr()) + 8), b)
-                                }
-                            };
+                            const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
 
-                            vst1q_s8(reinterpret_cast<int8_t *>(output.ptr()), vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1])));
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int16x8x2_t texels =
+                                {
+                                    {
+                                        vshlq_s16(vld1q_s16(input_ptr + x), b),
+                                        vshlq_s16(vld1q_s16(input_ptr + x + 8), b)
+                                    }
+                                };
+
+                                vst1q_s8(output_ptr + x, vcombine_s8(vmovn_s16(texels.val[0]), vmovn_s16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
@@ -426,34 +565,60 @@
                     /* Down-conversion S16 -> U8 */
                     if(ConvertPolicy::SATURATE == _policy)
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vqshlq_s16(vld1q_s16(reinterpret_cast<int16_t *>(input.ptr())), b),
-                                    vqshlq_s16(vld1q_s16(reinterpret_cast<int16_t *>(input.ptr()) + 8), b)
-                                }
-                            };
+                            const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
 
-                            vst1q_u8(output.ptr(), vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1])));
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int16x8x2_t texels =
+                                {
+                                    {
+                                        vqshlq_s16(vld1q_s16(input_ptr + x), b),
+                                        vqshlq_s16(vld1q_s16(input_ptr + x + 8), b)
+                                    }
+                                };
+
+                                vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(texels.val[0]), vqmovun_s16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
                     else
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const int16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_s16(vld1q_s16(reinterpret_cast<int16_t *>(input.ptr())), b),
-                                    vshlq_s16(vld1q_s16(reinterpret_cast<int16_t *>(input.ptr()) + 8), b)
-                                }
-                            };
+                            const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
 
-                            vst1q_u8(output.ptr(), vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])),
-                                                               vmovn_u16(vreinterpretq_u16_s16(texels.val[1]))));
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int16x8x2_t texels =
+                                {
+                                    {
+                                        vshlq_s16(vld1q_s16(input_ptr + x), b),
+                                        vshlq_s16(vld1q_s16(input_ptr + x + 8), b)
+                                    }
+                                };
+
+                                vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(vreinterpretq_u16_s16(texels.val[0])),
+                                                                     vmovn_u16(vreinterpretq_u16_s16(texels.val[1]))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
@@ -464,30 +629,43 @@
                     const int32x4_t b = vdupq_n_s32(_shift);
 
                     /* Up-conversion S16 -> S32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const int16x8x2_t texels =
-                        {
-                            {
-                                vld1q_s16(reinterpret_cast<int16_t *>(input.ptr())),
-                                vld1q_s16(reinterpret_cast<int16_t *>(input.ptr()) + 8)
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const int16_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
 
-                        const int32x4x4_t texels_s32 =
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
                         {
+                            const int16x8x2_t texels =
                             {
-                                vshlq_s32(vmovl_s16(vget_low_s16(texels.val[0])), b),
-                                vshlq_s32(vmovl_s16(vget_high_s16(texels.val[0])), b),
-                                vshlq_s32(vmovl_s16(vget_low_s16(texels.val[1])), b),
-                                vshlq_s32(vmovl_s16(vget_high_s16(texels.val[1])), b)
-                            }
-                        };
+                                {
+                                    vld1q_s16(input_ptr + x),
+                                    vld1q_s16(input_ptr + x + 8)
+                                }
+                            };
 
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()), texels_s32.val[0]);
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 4, texels_s32.val[1]);
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 8, texels_s32.val[2]);
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 12, texels_s32.val[3]);
+                            const int32x4x4_t texels_s32 =
+                            {
+                                {
+                                    vshlq_s32(vmovl_s16(vget_low_s16(texels.val[0])), b),
+                                    vshlq_s32(vmovl_s16(vget_high_s16(texels.val[0])), b),
+                                    vshlq_s32(vmovl_s16(vget_low_s16(texels.val[1])), b),
+                                    vshlq_s32(vmovl_s16(vget_high_s16(texels.val[1])), b)
+                                }
+                            };
+
+                            vst1q_s32(output_ptr + x, texels_s32.val[0]);
+                            vst1q_s32(output_ptr + x + 4, texels_s32.val[1]);
+                            vst1q_s32(output_ptr + x + 8, texels_s32.val[2]);
+                            vst1q_s32(output_ptr + x + 12, texels_s32.val[3]);
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) << _shift);
+                        }
                     },
                     input, output);
                     break;
@@ -508,33 +686,60 @@
                     /* Down-conversion U16 -> U8 */
                     if(ConvertPolicy::SATURATE == _policy)
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const uint16x8x2_t texels =
-                            {
-                                {
-                                    vqshlq_u16(vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr())), b),
-                                    vqshlq_u16(vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr()) + 8), b)
-                                }
-                            };
+                            const auto input_ptr  = reinterpret_cast<const uint16_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
 
-                            vst1q_u8(output.ptr(), vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1])));
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const uint16x8x2_t texels =
+                                {
+                                    {
+                                        vqshlq_u16(vld1q_u16(input_ptr + x), b),
+                                        vqshlq_u16(vld1q_u16(input_ptr + x + 8), b)
+                                    }
+                                };
+
+                                vst1q_u8(output_ptr + x, vcombine_u8(vqmovn_u16(texels.val[0]), vqmovn_u16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
                     else
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const uint16x8x2_t texels =
-                            {
-                                {
-                                    vshlq_u16(vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr())), b),
-                                    vshlq_u16(vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr()) + 8), b)
-                                }
-                            };
+                            const auto input_ptr  = reinterpret_cast<const uint16_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
 
-                            vst1q_u8(output.ptr(), vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1])));
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const uint16x8x2_t texels =
+                                {
+                                    {
+                                        vshlq_u16(vld1q_u16(input_ptr + x), b),
+                                        vshlq_u16(vld1q_u16(input_ptr + x + 8), b)
+                                    }
+                                };
+
+                                vst1q_u8(output_ptr + x, vcombine_u8(vmovn_u16(texels.val[0]), vmovn_u16(texels.val[1])));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
+                            }
+
                         },
                         input, output);
                     }
@@ -545,20 +750,33 @@
                     const int32x4_t b = vdupq_n_s32(_shift);
 
                     /* Up-conversion U16 -> U32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const uint16x8x2_t texels =
-                        {
-                            {
-                                vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr())),
-                                vld1q_u16(reinterpret_cast<uint16_t *>(input.ptr()) + 8)
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const uint16_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<uint32_t *>(output.ptr());
 
-                        vst1q_u32(reinterpret_cast<uint32_t *>(output.ptr()), vshlq_u32(vmovl_u16(vget_low_u16(texels.val[0])), b));
-                        vst1q_u32(reinterpret_cast<uint32_t *>(output.ptr()) + 4, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[0])), b));
-                        vst1q_u32(reinterpret_cast<uint32_t *>(output.ptr()) + 8, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[1])), b));
-                        vst1q_u32(reinterpret_cast<uint32_t *>(output.ptr()) + 12, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[1])), b));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const uint16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_u16(input_ptr + x),
+                                    vld1q_u16(input_ptr + x + 8)
+                                }
+                            };
+
+                            vst1q_u32(output_ptr + x, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[0])), b));
+                            vst1q_u32(output_ptr + x + 4, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[0])), b));
+                            vst1q_u32(output_ptr + x + 8, vshlq_u32(vmovl_u16(vget_low_u16(texels.val[1])), b));
+                            vst1q_u32(output_ptr + x + 12, vshlq_u32(vmovl_u16(vget_high_u16(texels.val[1])), b));
+                        }
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<uint32_t>(*(input_ptr + x) << _shift);
+                        }
+
                     },
                     input, output);
                     break;
@@ -574,20 +792,34 @@
             {
                 case DataType::QASYMM8_SIGNED:
                 {
-                    const float16x8_t scale = vdupq_n_f16(1 << _shift);
+                    const float16_t   scale_s = 1 << _shift;
+                    const float16x8_t scale   = vdupq_n_f16(scale_s);
 
                     /* Up-conversion F16 -> QASYMM8_SIGNED */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float16x8x2_t texels =
-                        {
-                            {
-                                vmulq_f16(vld1q_f16(reinterpret_cast<float16_t *>(input.ptr())), scale),
-                                vmulq_f16(vld1q_f16(reinterpret_cast<float16_t *>(input.ptr()) + 8), scale),
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const float16_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
 
-                        vst1q_s8(reinterpret_cast<int8_t *>(output.ptr()), vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1]))));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float16x8x2_t texels =
+                            {
+                                {
+                                    vmulq_f16(vld1q_f16(input_ptr + x), scale),
+                                    vmulq_f16(vld1q_f16(input_ptr + x + 8), scale),
+                                }
+                            };
+
+                            vst1q_s8(output_ptr + x, vcombine_s8(vqmovn_s16(vcvtq_s16_f16(texels.val[0])), vqmovn_s16(vcvtq_s16_f16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
@@ -595,66 +827,108 @@
                 case DataType::QASYMM8:
                 case DataType::U8:
                 {
-                    const float16x8_t scale = vdupq_n_f16(1 << _shift);
+                    const float16_t   scale_s = 1 << _shift;
+                    const float16x8_t scale   = vdupq_n_f16(scale_s);
 
                     /* Up-conversion F16 -> U8 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float16x8x2_t texels =
-                        {
-                            {
-                                vmulq_f16(vld1q_f16(reinterpret_cast<float16_t *>(input.ptr())), scale),
-                                vmulq_f16(vld1q_f16(reinterpret_cast<float16_t *>(input.ptr()) + 8), scale),
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const float16_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
 
-                        vst1q_u8(reinterpret_cast<uint8_t *>(output.ptr()), vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1]))));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float16x8x2_t texels =
+                            {
+                                {
+                                    vmulq_f16(vld1q_f16(input_ptr + x), scale),
+                                    vmulq_f16(vld1q_f16(input_ptr + x + 8), scale),
+                                }
+                            };
+
+                            vst1q_u8(output_ptr + x, vcombine_u8(vqmovun_s16(vcvtq_s16_f16(texels.val[0])), vqmovun_s16(vcvtq_s16_f16(texels.val[1]))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) * scale_s);
+                        }
+
                     },
                     input, output);
                     break;
                 }
                 case DataType::F32:
                 {
-                    const float32x4_t scale = vdupq_n_f32(1 << _shift);
+                    const float       scale_s = 1 << _shift;
+                    const float32x4_t scale   = vdupq_n_f32(scale_s);
 
                     /* Up-conversion F16 -> F32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float16x8x2_t texels =
-                        {
-                            {
-                                vld1q_f16(reinterpret_cast<float16_t *>(input.ptr())),
-                                vld1q_f16(reinterpret_cast<float16_t *>(input.ptr()) + 8)
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const float16_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
 
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()), vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_f16(input_ptr + x),
+                                    vld1q_f16(input_ptr + x + 8)
+                                }
+                            };
+                            vst1q_f32(output_ptr + x, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale));
+                            vst1q_f32(output_ptr + x + 4, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale));
+                            vst1q_f32(output_ptr + x + 8, vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale));
+                            vst1q_f32(output_ptr + x + 12, vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<float>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
                 }
                 case DataType::S32:
                 {
-                    const float32x4_t scale = vdupq_n_f32(1 << _shift);
+                    const float       scale_s = 1 << _shift;
+                    const float32x4_t scale   = vdupq_n_f32(scale_s);
 
                     /* Up-conversion F16 -> S32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float16x8x2_t texels =
-                        {
-                            {
-                                vld1q_f16(reinterpret_cast<float16_t *>(input.ptr())),
-                                vld1q_f16(reinterpret_cast<float16_t *>(input.ptr()) + 8)
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const float16_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
 
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()), vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale)));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 4, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale)));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 8, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale)));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 12, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale)));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float16x8x2_t texels =
+                            {
+                                {
+                                    vld1q_f16(input_ptr + x),
+                                    vld1q_f16(input_ptr + x + 8)
+                                }
+                            };
+
+                            vst1q_s32(output_ptr + x, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[0])), scale)));
+                            vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[0])), scale)));
+                            vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_low_f16(texels.val[1])), scale)));
+                            vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(vmulq_f32(vcvt_f32_f16(vget_high_f16(texels.val[1])), scale)));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
@@ -670,23 +944,37 @@
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
                 case DataType::F16:
                 {
-                    const float32x4_t scale = vdupq_n_f32(1.f / (1 << _shift));
+                    const float       scale_s = 1.f / (1 << _shift);
+                    const float32x4_t scale   = vdupq_n_f32(scale_s);
 
                     /* Down-conversion F32 -> F16 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float32x4x4_t texels =
-                        {
-                            {
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr())), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 4), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 8), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 12), scale)
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
 
-                        vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()), vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
-                        vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()) + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vmulq_f32(vld1q_f32(input_ptr + x), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 12), scale)
+                                }
+                            };
+
+                            vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
+                            vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<float16_t>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
@@ -694,25 +982,39 @@
 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
                 case DataType::S32:
                 {
-                    const float32x4_t scale = vdupq_n_f32(1.f / (1 << _shift));
+                    const float       scale_s = 1.f / (1 << _shift);
+                    const float32x4_t scale   = vdupq_n_f32(scale_s);
 
                     /* Conversion F32 -> S32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float32x4x4_t texels =
-                        {
-                            {
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr())), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 4), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 8), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 12), scale),
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int32_t *>(output.ptr());
 
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()), vcvtq_s32_f32(texels.val[0]));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 4, vcvtq_s32_f32(texels.val[1]));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 8, vcvtq_s32_f32(texels.val[2]));
-                        vst1q_s32(reinterpret_cast<int32_t *>(output.ptr()) + 12, vcvtq_s32_f32(texels.val[3]));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vmulq_f32(vld1q_f32(input_ptr + x), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
+                                }
+                            };
+
+                            vst1q_s32(output_ptr + x, vcvtq_s32_f32(texels.val[0]));
+                            vst1q_s32(output_ptr + x + 4, vcvtq_s32_f32(texels.val[1]));
+                            vst1q_s32(output_ptr + x + 8, vcvtq_s32_f32(texels.val[2]));
+                            vst1q_s32(output_ptr + x + 12, vcvtq_s32_f32(texels.val[3]));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<int32_t>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
@@ -720,46 +1022,73 @@
                 case DataType::QASYMM8:
                 case DataType::U8:
                 {
-                    const float32x4_t scale = vdupq_n_f32(1.f / (1 << _shift));
+                    const float       scale_s = 1.f / (1 << _shift);
+                    const float32x4_t scale   = vdupq_n_f32(scale_s);
 
                     /* Down-conversion F32 -> U8 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float32x4x4_t texels =
-                        {
-                            {
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr())), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 4), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 8), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 12), scale),
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
 
-                        vst1_u8(reinterpret_cast<uint8_t *>(output.ptr()), vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1])))));
-                        vst1_u8(reinterpret_cast<uint8_t *>(output.ptr()) + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3])))));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vmulq_f32(vld1q_f32(input_ptr + x), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
+                                }
+                            };
+
+                            vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[0])), vqmovun_s32(vcvtq_s32_f32(texels.val[1])))));
+                            vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(vcvtq_s32_f32(texels.val[2])), vqmovun_s32(vcvtq_s32_f32(texels.val[3])))));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
                 }
                 case DataType::QASYMM8_SIGNED:
                 {
-                    const float32x4_t scale = vdupq_n_f32(1.f / (1 << _shift));
+                    const float       scale_s = 1.f / (1 << _shift);
+                    const float32x4_t scale   = vdupq_n_f32(scale_s);
 
                     /* Down-conversion F32 -> QASYMM8_SIGNED */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float32x4x4_t texels =
-                        {
-                            {
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr())), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 4), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 8), scale),
-                                vmulq_f32(vld1q_f32(reinterpret_cast<float *>(input.ptr()) + 12), scale),
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const float *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
 
-                        vst1_s8(reinterpret_cast<int8_t *>(output.ptr()), vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1])))));
-                        vst1_s8(reinterpret_cast<int8_t *>(output.ptr()) + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3])))));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vmulq_f32(vld1q_f32(input_ptr + x), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 4), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 8), scale),
+                                    vmulq_f32(vld1q_f32(input_ptr + x + 12), scale),
+                                }
+                            };
+
+                            vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[0])), vqmovn_s32(vcvtq_s32_f32(texels.val[1])))));
+                            vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(vcvtq_s32_f32(texels.val[2])), vqmovn_s32(vcvtq_s32_f32(texels.val[3])))));
+                        }
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
@@ -776,23 +1105,37 @@
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
                 case DataType::F16:
                 {
-                    const float32x4_t scale = vdupq_n_f32(1.f / (1 << _shift));
+                    const float       scale_s = 1.f / (1 << _shift);
+                    const float32x4_t scale   = vdupq_n_f32(scale_s);
 
                     /* Down-conversion S32 -> F16 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const float32x4x4_t texels =
-                        {
-                            {
-                                vmulq_f32(vcvtq_f32_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()))), scale),
-                                vmulq_f32(vcvtq_f32_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 4)), scale),
-                                vmulq_f32(vcvtq_f32_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 8)), scale),
-                                vmulq_f32(vcvtq_f32_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 12)), scale)
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
 
-                        vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()), vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
-                        vst1q_f16(reinterpret_cast<float16_t *>(output.ptr()) + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const float32x4x4_t texels =
+                            {
+                                {
+                                    vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x)), scale),
+                                    vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 4)), scale),
+                                    vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 8)), scale),
+                                    vmulq_f32(vcvtq_f32_s32(vld1q_s32(input_ptr + x + 12)), scale)
+                                }
+                            };
+
+                            vst1q_f16(output_ptr + x, vcombine_f16(vcvt_f16_f32(texels.val[0]), vcvt_f16_f32(texels.val[1])));
+                            vst1q_f16(output_ptr + x + 8, vcombine_f16(vcvt_f16_f32(texels.val[2]), vcvt_f16_f32(texels.val[3])));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
@@ -800,25 +1143,39 @@
 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
                 case DataType::F32:
                 {
-                    const int32x4_t scale = vdupq_n_s32(1.f / (1 << _shift));
+                    const int       scale_s = 1.f / (1 << _shift);
+                    const int32x4_t scale   = vdupq_n_s32(scale_s);
 
                     /* Conversion S32 -> F32 */
-                    execute_window_loop(window, [&](const Coordinates &)
+                    execute_window_loop(win, [&](const Coordinates &)
                     {
-                        const int32x4x4_t texels =
-                        {
-                            {
-                                vmulq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr())), scale),
-                                vmulq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 4), scale),
-                                vmulq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 8), scale),
-                                vmulq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 12), scale),
-                            }
-                        };
+                        const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
+                        const auto output_ptr = reinterpret_cast<float *>(output.ptr());
 
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()), vcvtq_f32_s32(texels.val[0]));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, vcvtq_f32_s32(texels.val[1]));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, vcvtq_f32_s32(texels.val[2]));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, vcvtq_f32_s32(texels.val[3]));
+                        int x = window_start_x;
+                        for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                        {
+                            const int32x4x4_t texels =
+                            {
+                                {
+                                    vmulq_s32(vld1q_s32(input_ptr + x), scale),
+                                    vmulq_s32(vld1q_s32(input_ptr + x + 4), scale),
+                                    vmulq_s32(vld1q_s32(input_ptr + x + 8), scale),
+                                    vmulq_s32(vld1q_s32(input_ptr + x + 12), scale),
+                                }
+                            };
+
+                            vst1q_f32(output_ptr + x, vcvtq_f32_s32(texels.val[0]));
+                            vst1q_f32(output_ptr + x + 4, vcvtq_f32_s32(texels.val[1]));
+                            vst1q_f32(output_ptr + x + 8, vcvtq_f32_s32(texels.val[2]));
+                            vst1q_f32(output_ptr + x + 12, vcvtq_f32_s32(texels.val[3]));
+                        }
+
+                        // Compute left-over elements
+                        for(; x < window_end_x; ++x)
+                        {
+                            *(output_ptr + x) = static_cast<float>(*(input_ptr + x) * scale_s);
+                        }
                     },
                     input, output);
                     break;
@@ -830,38 +1187,64 @@
                     /* Down-conversion S32 -> QASYMM8_SIGNED */
                     if(ConvertPolicy::SATURATE == _policy)
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const int32x4x4_t texels =
+                            const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
                             {
+                                const int32x4x4_t texels =
                                 {
-                                    vqshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr())), b),
-                                    vqshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 4), b),
-                                    vqshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 8), b),
-                                    vqshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 12), b)
-                                }
-                            };
-                            vst1_s8(reinterpret_cast<int8_t *>(output.ptr()), vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1]))));
-                            vst1_s8(reinterpret_cast<int8_t *>(output.ptr()) + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3]))));
+                                    {
+                                        vqshlq_s32(vld1q_s32(input_ptr + x), b),
+                                        vqshlq_s32(vld1q_s32(input_ptr + x + 4), b),
+                                        vqshlq_s32(vld1q_s32(input_ptr + x + 8), b),
+                                        vqshlq_s32(vld1q_s32(input_ptr + x + 12), b)
+                                    }
+                                };
+                                vst1_s8(output_ptr + x, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[0]), vqmovn_s32(texels.val[1]))));
+                                vst1_s8(output_ptr + x + 8, vqmovn_s16(vcombine_s16(vqmovn_s32(texels.val[2]), vqmovn_s32(texels.val[3]))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = utils::cast::saturate_cast<int8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
                     else
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const int32x4x4_t texels =
-                            {
-                                {
-                                    vshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr())), b),
-                                    vshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 4), b),
-                                    vshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 8), b),
-                                    vshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 12), b)
-                                }
-                            };
+                            const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
 
-                            vst1_s8(reinterpret_cast<int8_t *>(output.ptr()), vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1]))));
-                            vst1_s8(reinterpret_cast<int8_t *>(output.ptr()) + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3]))));
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int32x4x4_t texels =
+                                {
+                                    {
+                                        vshlq_s32(vld1q_s32(input_ptr + x), b),
+                                        vshlq_s32(vld1q_s32(input_ptr + x + 4), b),
+                                        vshlq_s32(vld1q_s32(input_ptr + x + 8), b),
+                                        vshlq_s32(vld1q_s32(input_ptr + x + 12), b)
+                                    }
+                                };
+
+                                vst1_s8(output_ptr + x, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[0]), vmovn_s32(texels.val[1]))));
+                                vst1_s8(output_ptr + x + 8, vmovn_s16(vcombine_s16(vmovn_s32(texels.val[2]), vmovn_s32(texels.val[3]))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = static_cast<int8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
@@ -875,38 +1258,64 @@
                     /* Down-conversion S32 -> U8 */
                     if(ConvertPolicy::SATURATE == _policy)
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const int32x4x4_t texels =
+                            const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
+
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
                             {
+                                const int32x4x4_t texels =
                                 {
-                                    vqshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr())), b),
-                                    vqshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 4), b),
-                                    vqshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 8), b),
-                                    vqshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 12), b)
-                                }
-                            };
-                            vst1_u8(reinterpret_cast<uint8_t *>(output.ptr()), vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1]))));
-                            vst1_u8(reinterpret_cast<uint8_t *>(output.ptr()) + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3]))));
+                                    {
+                                        vqshlq_s32(vld1q_s32(input_ptr + x), b),
+                                        vqshlq_s32(vld1q_s32(input_ptr + x + 4), b),
+                                        vqshlq_s32(vld1q_s32(input_ptr + x + 8), b),
+                                        vqshlq_s32(vld1q_s32(input_ptr + x + 12), b)
+                                    }
+                                };
+                                vst1_u8(output_ptr + x, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[0]), vqmovun_s32(texels.val[1]))));
+                                vst1_u8(output_ptr + x + 8, vqmovn_u16(vcombine_u16(vqmovun_s32(texels.val[2]), vqmovun_s32(texels.val[3]))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = utils::cast::saturate_cast<uint8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
                     else
                     {
-                        execute_window_loop(window, [&](const Coordinates &)
+                        execute_window_loop(win, [&](const Coordinates &)
                         {
-                            const int32x4x4_t texels =
-                            {
-                                {
-                                    vshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr())), b),
-                                    vshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 4), b),
-                                    vshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 8), b),
-                                    vshlq_s32(vld1q_s32(reinterpret_cast<int32_t *>(input.ptr()) + 12), b)
-                                }
-                            };
+                            const auto input_ptr  = reinterpret_cast<const int32_t *>(input.ptr());
+                            const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
 
-                            vst1_u8(reinterpret_cast<uint8_t *>(output.ptr()), vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1])))));
-                            vst1_u8(reinterpret_cast<uint8_t *>(output.ptr()) + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3])))));
+                            int x = window_start_x;
+                            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+                            {
+                                const int32x4x4_t texels =
+                                {
+                                    {
+                                        vshlq_s32(vld1q_s32(input_ptr + x), b),
+                                        vshlq_s32(vld1q_s32(input_ptr + x + 4), b),
+                                        vshlq_s32(vld1q_s32(input_ptr + x + 8), b),
+                                        vshlq_s32(vld1q_s32(input_ptr + x + 12), b)
+                                    }
+                                };
+
+                                vst1_u8(output_ptr + x, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[0])), vmovn_u32(vreinterpretq_u32_s32(texels.val[1])))));
+                                vst1_u8(output_ptr + x + 8, vmovn_u16(vcombine_u16(vmovn_u32(vreinterpretq_u32_s32(texels.val[2])), vmovn_u32(vreinterpretq_u32_s32(texels.val[3])))));
+                            }
+
+                            // Compute left-over elements
+                            for(; x < window_end_x; ++x)
+                            {
+                                *(output_ptr + x) = static_cast<uint8_t>(*(input_ptr + x) >> _shift);
+                            }
                         },
                         input, output);
                     }
diff --git a/tests/validation/NEON/Cast.cpp b/tests/validation/NEON/Cast.cpp
index 2fe4e36..fd66be3 100644
--- a/tests/validation/NEON/Cast.cpp
+++ b/tests/validation/NEON/Cast.cpp
@@ -124,22 +124,6 @@
 
 #define CAST_SUITE(NAME, idt, odt, type, dataset, tolerance)                                                                     \
     TEST_SUITE(NAME)                                                                                                             \
-    DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), datasets::ConvertPolicies()),    \
-                   shape, policy)                                                                                                \
-    {                                                                                                                            \
-        Tensor src = create_tensor<Tensor>(shape, idt, 1);                                                                       \
-        Tensor dst = create_tensor<Tensor>(shape, odt, 1);                                                                       \
-        \
-        NECast cast;                                                                                                             \
-        cast.configure(&src, &dst, policy);                                                                                      \
-        \
-        const ValidRegion valid_region = shape_to_valid_region(shape);                                                           \
-        validate(dst.info()->valid_region(), valid_region);                                                                      \
-        \
-        const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();                                         \
-        validate(src.info()->padding(), padding);                                                                                \
-        validate(dst.info()->padding(), padding);                                                                                \
-    }                                                                                                                            \
     FIXTURE_DATA_TEST_CASE(RunSmall, type, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), dataset), \
                                                                                       datasets::ConvertPolicies()))              \
     {                                                                                                                            \
diff --git a/tests/validation/NEON/DepthConvertLayer.cpp b/tests/validation/NEON/DepthConvertLayer.cpp
index a3482d1..b7de8fd 100644
--- a/tests/validation/NEON/DepthConvertLayer.cpp
+++ b/tests/validation/NEON/DepthConvertLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -44,29 +44,29 @@
 namespace
 {
 /** Input data sets **/
-const auto DepthConvertLayerQASYMM8toF16Dataset   = combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::F16));
-const auto DepthConvertLayerQASYMM8toF32Dataset   = combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerQASYMM8toS32Dataset   = combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerU8toU16Dataset        = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
-const auto DepthConvertLayerU8toS16Dataset        = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
-const auto DepthConvertLayerU8toS32Dataset        = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerU8toF16Dataset        = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::F16));
-const auto DepthConvertLayerU8toF32Dataset        = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerU16toU8Dataset        = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertLayerU16toU32Dataset       = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
-const auto DepthConvertLayerS16toU8Dataset        = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertLayerS16toS32Dataset       = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerF16toU8Dataset       = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::U8));
-const auto DepthConvertLayerF16toF32Dataset       = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerF16toS32Dataset       = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerF32toF16Dataset       = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F16));
-const auto DepthConvertLayerF32toS32Dataset       = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::S32));
-const auto DepthConvertLayerF32toU8Dataset       = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerQASYMM8toF16Dataset = combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::F16));
+const auto DepthConvertLayerQASYMM8toF32Dataset = combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerQASYMM8toS32Dataset = combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerU8toU16Dataset      = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U16));
+const auto DepthConvertLayerU8toS16Dataset      = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S16));
+const auto DepthConvertLayerU8toS32Dataset      = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerU8toF16Dataset      = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::F16));
+const auto DepthConvertLayerU8toF32Dataset      = combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerU16toU8Dataset      = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerU16toU32Dataset     = combine(framework::dataset::make("DataType", DataType::U16), framework::dataset::make("DataType", DataType::U32));
+const auto DepthConvertLayerS16toU8Dataset      = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerS16toS32Dataset     = combine(framework::dataset::make("DataType", DataType::S16), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerF16toU8Dataset      = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerF16toF32Dataset     = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerF16toS32Dataset     = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerF32toF16Dataset     = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F16));
+const auto DepthConvertLayerF32toS32Dataset     = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::S32));
+const auto DepthConvertLayerF32toU8Dataset      = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::U8));
 
-const auto DepthConvertLayerS32toF32Dataset       = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::F32));
-const auto DepthConvertLayerS32toQASYMM8Dataset       = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::QASYMM8));
-const auto DepthConvertLayerS32toF16Dataset       = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::F16));
-const auto DepthConvertLayerS32toU8Dataset       = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::U8));
+const auto DepthConvertLayerS32toF32Dataset     = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::F32));
+const auto DepthConvertLayerS32toQASYMM8Dataset = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::QASYMM8));
+const auto DepthConvertLayerS32toF16Dataset     = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::F16));
+const auto DepthConvertLayerS32toU8Dataset      = combine(framework::dataset::make("DataType", DataType::S32), framework::dataset::make("DataType", DataType::U8));
 
 const auto DepthConvertLayerF16toQASYMM8Dataset   = combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::QASYMM8));
 const auto DepthConvertLayerF32toQASYMM8Dataset   = combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::QASYMM8));
@@ -140,28 +140,6 @@
 using NEDepthConvertLayerQuantizedToS32Fixture = DepthConvertLayerValidationQuantizedFixture<Tensor, Accessor, NEDepthConvertLayer, T, int32_t>;
 
 TEST_SUITE(QASYMM8_to_F32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::QASYMM8, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::F32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerQuantizedToF32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
                        DepthConvertLayerQASYMM8toF32Dataset),
                        framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
@@ -183,28 +161,6 @@
 TEST_SUITE_END() // QASYMM8_to_F32
 
 TEST_SUITE(QASYMM8_to_S32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::QASYMM8, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerQuantizedToS32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
                        DepthConvertLayerQASYMM8toS32Dataset),
                        framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
@@ -226,28 +182,6 @@
 TEST_SUITE_END() // QASYMM8_to_S32
 
 TEST_SUITE(U8_to_U16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U16, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toU16Dataset),
                                                                                                                       framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                       DepthConvertLayerShiftDatasetPrecommit))
@@ -266,28 +200,6 @@
 TEST_SUITE_END() // U8_to_U16
 
 TEST_SUITE(U8_to_S16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S16, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS16Dataset),
                                                                                                                       framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                       DepthConvertLayerShiftDatasetPrecommit))
@@ -305,28 +217,6 @@
 }
 TEST_SUITE_END() // U8_to_S16
 TEST_SUITE(U8_to_S32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toS32Dataset),
                                                                                                                       framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                       DepthConvertLayerShiftDatasetPrecommit))
@@ -344,30 +234,7 @@
 }
 TEST_SUITE_END() // U8_to_S32
 
-
 TEST_SUITE(U8_to_F32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::F32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF32Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toF32Dataset),
                                                                                                                       framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                       DepthConvertLayerShiftDatasetPrecommit))
@@ -387,28 +254,6 @@
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 TEST_SUITE(U8_to_F16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U8, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::F16, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU8toF16Dataset),
                                                                                                                       framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                       DepthConvertLayerShiftDatasetPrecommit))
@@ -425,32 +270,9 @@
     validate(Accessor(_target), _reference);
 }
 TEST_SUITE_END() // U8_to_F36
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
+#endif           // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 
 TEST_SUITE(U16_to_U8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU8Dataset),
                                                                                                                       framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                       DepthConvertLayerShiftDatasetPrecommit))
@@ -468,28 +290,6 @@
 TEST_SUITE_END() // U16_to_U8
 
 TEST_SUITE(U16_to_U32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::U16, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU32Fixture<uint16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerU16toU32Dataset),
                                                                                                                        framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                        DepthConvertLayerShiftDatasetPrecommit))
@@ -507,28 +307,6 @@
 TEST_SUITE_END() // U16_to_U32
 
 TEST_SUITE(S16_to_U8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toU8Dataset),
                                                                                                                      framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                      DepthConvertLayerShiftDatasetPrecommit))
@@ -546,28 +324,6 @@
 TEST_SUITE_END() // S16_to_U8
 
 TEST_SUITE(S16_to_S32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerShiftDatasetNightly),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::S16, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS16toS32Dataset),
                                                                                                                       framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                       DepthConvertLayerShiftDatasetPrecommit))
@@ -586,28 +342,6 @@
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 TEST_SUITE(F16_to_QASYMM8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::F16, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::QASYMM8, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToQASYMM8Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
                                                                                                                        DepthConvertLayerF16toQASYMM8Dataset),
                                                                                                                        framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
@@ -628,71 +362,24 @@
 }
 TEST_SUITE_END() // F16_to_QASYMM8
 
-
 TEST_SUITE(F16_to_U8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::F16, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerF16toU8Dataset),
-                                                                                                                   framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                   DepthConvertLayerZeroShiftDataset))
+                                                                                                                  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                  DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_one_uint8);
 }
 FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU8Fixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerF16toU8Dataset),
-                                                                                                                 framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                 DepthConvertLayerZeroShiftDataset))
+                                                                                                                        framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_one_uint8);
 }
 TEST_SUITE_END() // F16_to_U8
 
-
-
 TEST_SUITE(F16_to_F32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::F16, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::F32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF32Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerF16toF32Dataset),
                                                                                                                    framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                    DepthConvertLayerZeroShiftDataset))
@@ -710,28 +397,6 @@
 TEST_SUITE_END() // F16_to_F32
 
 TEST_SUITE(F16_to_S32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::F16, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerF16toS32Dataset),
                                                                                                                    framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                    DepthConvertLayerZeroShiftDataset))
@@ -750,28 +415,6 @@
 TEST_SUITE_END() // F16_to_S32
 
 TEST_SUITE(QASYMM8_to_F16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::QASYMM8, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::F16, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerQuantizedToF16Fixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
                        DepthConvertLayerQASYMM8toF16Dataset),
                        framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
@@ -793,28 +436,6 @@
 TEST_SUITE_END() // QASYMM8_to_F16
 
 TEST_SUITE(F32_to_F16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::F32, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::F16, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF16Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerF32toF16Dataset),
                                                                                                                     framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                     DepthConvertLayerZeroShiftDataset))
@@ -832,69 +453,25 @@
 TEST_SUITE_END() // F32_to_F16
 
 TEST_SUITE(S32_to_F16)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::S32, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::F16, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF16Fixture<int32_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS32toF16Dataset),
+                                                                                                                      framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                      DepthConvertLayerZeroShiftDataset))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToF16Fixture<int32_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS32toF16Dataset),
                                                                                                                     framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                     DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference);
 }
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToF16Fixture<int32_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS32toF16Dataset),
-                                                                                                                  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                  DepthConvertLayerZeroShiftDataset))
-{
-    // Validate output
-    validate(Accessor(_target), _reference);
-}
 TEST_SUITE_END() // S32_to_F16
 
-#endif           /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
+#endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
 
 TEST_SUITE(F32_to_S32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::F32, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::S32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToS32Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerF32toS32Dataset),
                                                                                                                     framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
                                                                                                                     DepthConvertLayerZeroShiftDataset))
@@ -912,69 +489,23 @@
 TEST_SUITE_END() // F32_to_S32
 
 TEST_SUITE(F32_to_U8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::F32, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerF32toU8Dataset),
-                                                                                                                    framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                    DepthConvertLayerZeroShiftDataset))
+                                                                                                                   framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                   DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_one_int32);
 }
 FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU8Fixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerF32toU8Dataset),
-                                                                                                                  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                  DepthConvertLayerZeroShiftDataset))
+                                                                                                                 framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                 DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_one_int32);
 }
 TEST_SUITE_END() // F32_to_U8
 
-
-
 TEST_SUITE(F32_to_QASYMM8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::F32, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::QASYMM8, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToQASYMM8Fixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
                                                                                                                         DepthConvertLayerF32toQASYMM8Dataset),
                                                                                                                         framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
@@ -995,40 +526,17 @@
 }
 TEST_SUITE_END() // F32_to_QASYMM8
 
-
 TEST_SUITE(S32_to_F32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::S32, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::F32, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToF32Fixture<int32_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS32toF32Dataset),
-                                                                                                                    framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                    DepthConvertLayerZeroShiftDataset))
+                                                                                                                      framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                      DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference);
 }
 FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToF32Fixture<int32_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS32toF32Dataset),
-                                                                                                                  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                  DepthConvertLayerZeroShiftDataset))
+                                                                                                                    framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                    DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference);
@@ -1036,29 +544,16 @@
 TEST_SUITE_END() // S32_to_F32
 
 TEST_SUITE(S32_to_QASYMM8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::S32, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::QASYMM8, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToQASYMM8Fixture<int32_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+                       DepthConvertLayerS32toQASYMM8Dataset),
+                       framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
+                       DepthConvertLayerZeroShiftDataset),
+                       framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qasymm8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToQASYMM8Fixture<int32_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeShapes(),
                                                                                                                         DepthConvertLayerS32toQASYMM8Dataset),
                                                                                                                         framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
                                                                                                                         DepthConvertLayerZeroShiftDataset),
@@ -1067,59 +562,25 @@
     // Validate output
     validate(Accessor(_target), _reference, tolerance_qasymm8);
 }
-FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToQASYMM8Fixture<int32_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(datasets::LargeShapes(),
-                                                                                                                      DepthConvertLayerS32toQASYMM8Dataset),
-                                                                                                                      framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE })),
-                                                                                                                      DepthConvertLayerZeroShiftDataset),
-                                                                                                                      framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10) })))
-{
-    // Validate output
-    validate(Accessor(_target), _reference, tolerance_qasymm8);
-}
 TEST_SUITE_END() // S32_to_QASYMM8
 
 TEST_SUITE(S32_to_U8)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                   DepthConvertLayerZeroShiftDataset),
-               shape, policy, shift)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, DataType::S32, 1);
-    Tensor dst = create_tensor<Tensor>(shape, DataType::U8, 1);
-
-    // Create and Configure function
-    NEDepthConvertLayer depth_convert;
-    depth_convert.configure(&src, &dst, policy, shift);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
 FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConvertLayerToU8Fixture<int32_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), DepthConvertLayerS32toU8Dataset),
-                                                                                                                    framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                    DepthConvertLayerZeroShiftDataset))
+                                                                                                                     framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                     DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference);
 }
 FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConvertLayerToU8Fixture<int32_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), DepthConvertLayerS32toU8Dataset),
-                                                                                                                  framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
-                                                                                                                  DepthConvertLayerZeroShiftDataset))
+                                                                                                                   framework::dataset::make("ConvertPolicy", { ConvertPolicy::SATURATE, ConvertPolicy::WRAP })),
+                                                                                                                   DepthConvertLayerZeroShiftDataset))
 {
     // Validate output
     validate(Accessor(_target), _reference);
 }
 TEST_SUITE_END() // S32_to_U8
 
-
-
-
 TEST_SUITE_END() // DepthConvertLayer
 TEST_SUITE_END() // NEON
 } // namespace validation