COMPMID-401: Implement FixedPointPosition conversion for NEON.

Adds support of changing the fixed point position of a tensor in
DepthConvert.

Change-Id: Ic3b50a4628fac7497a0217d92941c9d6f64d21cb
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80438
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEDepthConvertKernel.h b/arm_compute/core/NEON/kernels/NEDepthConvertKernel.h
index 0c5c29e..ad8d152 100644
--- a/arm_compute/core/NEON/kernels/NEDepthConvertKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthConvertKernel.h
@@ -24,7 +24,7 @@
 #ifndef __ARM_COMPUTE_DEPTHCONVERTKERNEL_H__
 #define __ARM_COMPUTE_DEPTHCONVERTKERNEL_H__
 
-#include "arm_compute/core/NEON/INESimpleKernel.h"
+#include "arm_compute/core/NEON/INEKernel.h"
 #include "arm_compute/core/Types.h"
 
 #include <cstdint>
@@ -34,35 +34,52 @@
 class ITensor;
 
 /** Depth conversion kernel */
-class NEDepthConvertKernel : public INESimpleKernel
+class NEDepthConvertKernel : public INEKernel
 {
 public:
     /** Default constructor*/
     NEDepthConvertKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEDepthConvertKernel(const NEDepthConvertKernel &) = delete;
+    /** Default move constructor */
+    NEDepthConvertKernel(NEDepthConvertKernel &&) = default;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    NEDepthConvertKernel &operator=(const NEDepthConvertKernel &) = delete;
+    /** Default move assignment operator */
+    NEDepthConvertKernel &operator=(NEDepthConvertKernel &&) = default;
     /** Set the input and output of the kernel
      *
      * Valid conversions Input -> Output :
      *
-     *   - QS8 -> F32
+     *   - QS8 -> QS8, F32
      *   - U8 -> U16, S16, S32
      *   - U16 -> U8, U32
      *   - S16 -> U8, S32
+     *   - QS16 -> QS16, F32
      *   - F32 -> QS8
      *
+     * @warning In case of in-place fixed point position conversion make sure that configure has been called
+     *          before the updated tensor is used in other functions, as the TensorInfo of the tensor will be
+     *          altered. In-place is only supported for QS8 -> QS8, QS16 -> QS16.
      *
-     * @param[in]  input  The input tensor to convert. Data types supported: U8/QS8/U16/S16/F32.
-     * @param[out] output The output tensor. Data types supported: U8/QS8/U16/S16/U32/S32/F32.
-     * @param[in]  policy Conversion policy.
-     * @param[in]  shift  Value for down/up conversions. Must be 0 <= shift < 8.
+     * @param[in, out] input  The input tensor to convert (Written in case of in-place computation). Data types supported: U8/QS8/U16/S16/F32.
+     * @param[out]     output The output tensor. Can be null in case of in-place computation. Data types supported: U8/QS8/U16/S16/U32/S32/F32.
+     * @param[in]      policy Conversion policy.
+     * @param[in]      shift  (Optional) Value for down/up conversions. Must be 0 <= shift < 8.
+     *                         In case of fixed point position conversion, it specifies the new fixed point position, if operation is in-place.
      */
-    void configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift);
+    void configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift = 0);
 
     // Inherited methods overridden:
     void run(const Window &window) override;
 
 private:
+    ITensor      *_input;
+    ITensor      *_output;
     ConvertPolicy _policy;
     uint32_t      _shift;
+    int           _fixed_point_position_input;
+    int           _fixed_point_position_output;
 };
 }
 #endif /*__ARM_COMPUTE_NEDEPTHCONVERTKERNEL_H__ */
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConvert.h b/arm_compute/runtime/NEON/functions/NEDepthConvert.h
index 47b3a7e..37f7293 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConvert.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConvert.h
@@ -45,24 +45,25 @@
     const NEDepthConvert &operator=(const NEDepthConvert &) = delete;
     /** Initialize the function's source, destination
      *
-     * Input format must be different than output format.
-     *
      * Valid conversions Input -> Output :
-     *    QS8 -> F32
+     *    QS8 -> QS8, F32
      *    U8 -> U16, S16, S32
      *    U16 -> U8, U32
      *    S16 -> U8, S32
-     *    QS16 -> F32
+     *    QS16 -> QS16, F32
      *    F32 -> QS8, QS16
      *
+     * @warning In case of in-place fixed point position conversion make sure that configure has been called
+     *          before the updated tensor is used in other functions, as the TensorInfo of the tensor will be
+     *          altered. In-place is only supported for QS8 -> QS8, QS16 -> QS16.
      *
-     * @param[in]  input  The input tensor to convert. Data type supported: QS8/U8/U16/S16/QS16/F32.
-     * @param[out] output The output tensor. Data type supported: QS8/U8/U16/S16/QS16/U32/S32/F32.
-     * @param[in]  policy Conversion policy.
-     * @param[in]  shift  Value for down/up conversions. Must be 0 <= shift < 8.
-     *                    It is not used on fixed point conversion.
+     * @param[in, out] input  The input tensor to convert (Written in case of in-place computation). Data types supported: U8/QS8/U16/S16/F32.
+     * @param[out]     output The output tensor. Can be null in case of in-place computation. Data types supported: U8/QS8/U16/S16/U32/S32/F32.
+     * @param[in]      policy Conversion policy.
+     * @param[in]      shift  (Optional) Value for down/up conversions. Must be 0 <= shift < 8.
+     *                        In case of fixed point position conversion, it specifies the new fixed point position, if operation is in-place.
      */
-    void configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift);
+    void configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift = 0);
 };
 }
 #endif /*__ARM_COMPUTE_NEDEPTHCONVERT_H__*/
diff --git a/src/core/NEON/kernels/NEDepthConvertKernel.cpp b/src/core/NEON/kernels/NEDepthConvertKernel.cpp
index 3c1a94d..f720370 100644
--- a/src/core/NEON/kernels/NEDepthConvertKernel.cpp
+++ b/src/core/NEON/kernels/NEDepthConvertKernel.cpp
@@ -40,24 +40,53 @@
 } // namespace arm_compute
 
 NEDepthConvertKernel::NEDepthConvertKernel()
-    : _policy(), _shift(0)
+    : _input(nullptr), _output(nullptr), _policy(), _shift(0), _fixed_point_position_input(0), _fixed_point_position_output(0)
 {
 }
 
-void NEDepthConvertKernel::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
+void NEDepthConvertKernel::configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
 {
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QS8, DataType::S16, DataType::U16, DataType::QS16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::S16, DataType::U16, DataType::QS16, DataType::U32, DataType::S32, DataType::F32);
-    ARM_COMPUTE_ERROR_ON(shift >= 8);
-    ARM_COMPUTE_ERROR_ON(input == output);
-    ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == output->info()->data_type(), "Input and output data_types must be different");
+
+    _input  = input;
+    _output = input;
+    _policy = policy;
+    _shift  = shift;
+
+    if(output != nullptr)
+    {
+        // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given)
+        set_shape_if_empty(*output->info(), input->info()->tensor_shape());
+
+        ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8, DataType::QS8, DataType::S16, DataType::U16, DataType::QS16, DataType::U32, DataType::S32, DataType::F32);
+        ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
+
+        // Set output
+        _output = output;
+    }
+
+    // Set initial fixed point position of input and output
+    _fixed_point_position_input  = input->info()->fixed_point_position();
+    _fixed_point_position_output = _output->info()->fixed_point_position();
+
+    // Set the fixed point position to the output tensor if needed
+    if(is_data_type_fixed_point(input->info()->data_type()) && is_data_type_fixed_point(_output->info()->data_type()))
+    {
+        // If in-place set the fixed point position of the output tensor to be equal to shift
+        _fixed_point_position_output = (_input == _output) ? static_cast<int>(_shift) : _fixed_point_position_output;
+        // Set fixed point position to output tensor
+        _output->info()->set_fixed_point_position(_fixed_point_position_output);
+    }
+
+    ARM_COMPUTE_ERROR_ON(shift >= 8 && (!is_data_type_fixed_point(input->info()->data_type()) && !is_data_type_fixed_point(output->info()->data_type())));
+    ARM_COMPUTE_ERROR_ON(input == output && (data_size_from_type(input->info()->data_type()) != data_size_from_type(output->info()->data_type())));
 
     ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == DataType::U8 && (output->info()->data_type() != DataType::S16 && output->info()->data_type() != DataType::U16
                                                                             && output->info()->data_type() != DataType::S32),
                              "Only data_types supported [in] U8 -> [out] U16, S16, S32");
 
-    ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == DataType::QS8 && output->info()->data_type() != DataType::F32,
-                             "Only data_types supported [in] QS8 ->  [out] F32");
+    ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == DataType::QS8 && (output->info()->data_type() != DataType::QS8 && output->info()->data_type() != DataType::F32),
+                             "Only data_types supported [in] QS8 ->  [out] QS8, F32");
 
     ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == DataType::U16 && (output->info()->data_type() != DataType::U8 && output->info()->data_type() != DataType::U32),
                              "Only data_types supported [in] U16 ->  [out] U8, U32");
@@ -65,28 +94,36 @@
     ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == DataType::S16 && (output->info()->data_type() != DataType::U8 && output->info()->data_type() != DataType::S32),
                              "Only data_types supported [in] S16 ->  [out] U8, S32");
 
-    ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == DataType::QS16 && output->info()->data_type() != DataType::F32,
-                             "Only data_types supported [in] QS16 ->  [out] F32");
+    ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == DataType::QS16 && (output->info()->data_type() != DataType::QS16 && output->info()->data_type() != DataType::F32),
+                             "Only data_types supported [in] QS16 ->  [out] QS16, F32");
 
     ARM_COMPUTE_ERROR_ON_MSG(input->info()->data_type() == DataType::F32 && (output->info()->data_type() != DataType::QS8 && output->info()->data_type() != DataType::QS16),
                              "Only data_types supported [in] F32 ->  [out] QS8, QS16");
 
-    // Auto initialize output shape if not initialized (We can only auto-configure the shape, datatype must be given)
-    set_shape_if_empty(*output->info(), input->info()->tensor_shape());
-
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
-
-    _policy = policy;
-    _shift  = shift;
-
     constexpr unsigned int num_elems_processed_per_iteration = 16;
-    INESimpleKernel::configure(input, output, num_elems_processed_per_iteration);
+
+    // Configure kernel window
+    Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+
+    AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
+    if(output != nullptr)
+    {
+        AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
+        update_window_and_padding(win, input_access, output_access);
+        output_access.set_valid_region(win, input->info()->valid_region());
+    }
+    else
+    {
+        // In-place computation
+        update_window_and_padding(win, input_access);
+    }
+    ICPPKernel::configure(win);
 }
 
 void NEDepthConvertKernel::run(const Window &window)
 {
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INESimpleKernel::window(), window);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
     ARM_COMPUTE_ERROR_ON(nullptr == _input);
     ARM_COMPUTE_ERROR_ON(nullptr == _output);
     ARM_COMPUTE_ERROR_ON(_input == _output);
@@ -94,37 +131,10 @@
     Iterator input(_input, window);
     Iterator output(_output, window);
 
+    bool in_place = (_input == _output);
+
     switch(_input->info()->data_type())
     {
-        case DataType::QS8:
-        {
-            const int fixed_point_position = _input->info()->fixed_point_position();
-
-            switch(_output->info()->data_type())
-            {
-                case DataType::F32:
-                {
-                    /* Up-conversion QS8 -> F32 */
-                    execute_window_loop(window, [&](const Coordinates & id)
-                    {
-                        const int8x16_t texels_s8 = vld1q_s8(reinterpret_cast<const int8_t *>(input.ptr()));
-
-                        float32x4x2_t texels_low  = vcvt_f32_qs8(vget_low_s8(texels_s8), fixed_point_position);
-                        float32x4x2_t texels_high = vcvt_f32_qs8(vget_high_s8(texels_s8), fixed_point_position);
-
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()), texels_low.val[0]);
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, texels_low.val[1]);
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, texels_high.val[0]);
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, texels_high.val[1]);
-                    },
-                    input, output);
-                    break;
-                }
-                default:
-                    ARM_COMPUTE_ERROR("Output data type not supported");
-            }
-            break;
-        }
         case DataType::U8:
         {
             const int16x8_t b = vdupq_n_s16(_shift);
@@ -201,6 +211,49 @@
             }
             break;
         }
+        case DataType::QS8:
+        {
+            switch(_output->info()->data_type())
+            {
+                case DataType::QS8:
+                {
+                    const int relative_shift = _fixed_point_position_output - _fixed_point_position_input;
+                    /* Fixed point position conversion QS8 -> QS8 */
+                    if(relative_shift != 0 || !in_place)
+                    {
+                        const auto relative_shift_vec = vdupq_n_qs8(relative_shift);
+                        execute_window_loop(window, [&](const Coordinates & id)
+                        {
+                            const qint8x16_t texels_qs8 = vld1q_qs8(reinterpret_cast<const qint8_t *>(input.ptr()));
+                            vst1q_qs8(reinterpret_cast<qint8_t *>(output.ptr()), vqrshlq_s8(texels_qs8, relative_shift_vec));
+                        },
+                        input, output);
+                    }
+                    break;
+                }
+                case DataType::F32:
+                {
+                    /* Up-conversion QS8 -> F32 */
+                    execute_window_loop(window, [&](const Coordinates & id)
+                    {
+                        const qint8x16_t texels_qs8 = vld1q_qs8(reinterpret_cast<const qint8_t *>(input.ptr()));
+
+                        float32x4x2_t texels_low  = vcvt_f32_qs8(vget_low_s8(texels_qs8), _fixed_point_position_input);
+                        float32x4x2_t texels_high = vcvt_f32_qs8(vget_high_s8(texels_qs8), _fixed_point_position_input);
+
+                        vst1q_f32(reinterpret_cast<float *>(output.ptr()), texels_low.val[0]);
+                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, texels_low.val[1]);
+                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, texels_high.val[0]);
+                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, texels_high.val[1]);
+                    },
+                    input, output);
+                    break;
+                }
+                default:
+                    ARM_COMPUTE_ERROR("Output data type not supported");
+            }
+            break;
+        }
         case DataType::S16:
         {
             switch(_output->info()->data_type())
@@ -356,16 +409,37 @@
         }
         case DataType::QS16:
         {
-            const int fixed_point_position = _input->info()->fixed_point_position();
-
             switch(_output->info()->data_type())
             {
+                case DataType::QS16:
+                {
+                    const int relative_shift = _fixed_point_position_output - _fixed_point_position_input;
+                    /* Fixed point position conversion QS16 -> QS16 */
+                    if(relative_shift != 0 || !in_place)
+                    {
+                        const auto relative_shift_vec = vdupq_n_qs16(relative_shift);
+                        execute_window_loop(window, [&](const Coordinates & id)
+                        {
+                            const qint16x8x2_t texels_qs16 =
+                            {
+                                {
+                                    vld1q_qs16(reinterpret_cast<qint16_t *>(input.ptr())),
+                                    vld1q_qs16(reinterpret_cast<qint16_t *>(input.ptr()) + 8)
+                                }
+                            };
+                            vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()), vqrshlq_s16(texels_qs16.val[0], relative_shift_vec));
+                            vst1q_qs16(reinterpret_cast<qint16_t *>(output.ptr()) + 8, vqrshlq_s16(texels_qs16.val[1], relative_shift_vec));
+                        },
+                        input, output);
+                    }
+                    break;
+                }
                 case DataType::F32:
                 {
                     /* Up-conversion QS16 -> F32 */
                     execute_window_loop(window, [&](const Coordinates & id)
                     {
-                        const int16x8x2_t texels =
+                        const int16x8x2_t texels_qs16 =
                         {
                             {
                                 vld1q_s16(reinterpret_cast<qint16_t *>(input.ptr())),
@@ -373,10 +447,10 @@
                             }
                         };
 
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()), vcvt_f32_qs16(vget_low_s16(texels.val[0]), fixed_point_position));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, vcvt_f32_qs16(vget_high_s16(texels.val[0]), fixed_point_position));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, vcvt_f32_qs16(vget_low_s16(texels.val[1]), fixed_point_position));
-                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, vcvt_f32_qs16(vget_high_s16(texels.val[1]), fixed_point_position));
+                        vst1q_f32(reinterpret_cast<float *>(output.ptr()), vcvt_f32_qs16(vget_low_s16(texels_qs16.val[0]), _fixed_point_position_input));
+                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 4, vcvt_f32_qs16(vget_high_s16(texels_qs16.val[0]), _fixed_point_position_input));
+                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 8, vcvt_f32_qs16(vget_low_s16(texels_qs16.val[1]), _fixed_point_position_input));
+                        vst1q_f32(reinterpret_cast<float *>(output.ptr()) + 12, vcvt_f32_qs16(vget_high_s16(texels_qs16.val[1]), _fixed_point_position_input));
                     },
                     input, output);
                     break;
@@ -392,7 +466,6 @@
             {
                 case DataType::QS8:
                 {
-                    const int fixed_point_position = _output->info()->fixed_point_position();
                     /* Down-conversion F32 -> QS8 */
                     execute_window_loop(window, [&](const Coordinates & id)
                     {
@@ -406,7 +479,7 @@
                             }
                         };
 
-                        const qint8x16_t texels_s8 = vqcvtq_qs8_f32(texels_f32, fixed_point_position);
+                        const qint8x16_t texels_s8 = vqcvtq_qs8_f32(texels_f32, _fixed_point_position_output);
 
                         vst1q_s8(reinterpret_cast<int8_t *>(output.ptr()), texels_s8);
                     },
@@ -415,7 +488,6 @@
                 }
                 case DataType::QS16:
                 {
-                    const int fixed_point_position = _output->info()->fixed_point_position();
                     /* Down-conversion F32 -> QS16 */
                     execute_window_loop(window, [&](const Coordinates & id)
                     {
@@ -434,8 +506,8 @@
                             }
                         };
 
-                        vst1q_s16(reinterpret_cast<qint16_t *>(output.ptr()), vqcvtq_qs16_f32(texels_f32_1, fixed_point_position));
-                        vst1q_s16(reinterpret_cast<qint16_t *>(output.ptr()) + 8, vqcvtq_qs16_f32(texels_f32_2, fixed_point_position));
+                        vst1q_s16(reinterpret_cast<qint16_t *>(output.ptr()), vqcvtq_qs16_f32(texels_f32_1, _fixed_point_position_output));
+                        vst1q_s16(reinterpret_cast<qint16_t *>(output.ptr()) + 8, vqcvtq_qs16_f32(texels_f32_2, _fixed_point_position_output));
                     },
                     input, output);
                     break;
diff --git a/src/runtime/NEON/functions/NEDepthConvert.cpp b/src/runtime/NEON/functions/NEDepthConvert.cpp
index 24b5149..37857b6 100644
--- a/src/runtime/NEON/functions/NEDepthConvert.cpp
+++ b/src/runtime/NEON/functions/NEDepthConvert.cpp
@@ -30,7 +30,7 @@
 
 using namespace arm_compute;
 
-void NEDepthConvert::configure(const ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
+void NEDepthConvert::configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift)
 {
     auto k = arm_compute::support::cpp14::make_unique<NEDepthConvertKernel>();
     k->configure(input, output, policy, shift);
diff --git a/tests/validation/CL/DepthConvert.cpp b/tests/validation/CL/DepthConvert.cpp
index 2655f00..547820e 100644
--- a/tests/validation/CL/DepthConvert.cpp
+++ b/tests/validation/CL/DepthConvert.cpp
@@ -469,7 +469,7 @@
     CLTensor dst = compute_depth_convert(shape, DataType::F32, dt, policy, 0, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, dt, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, dt, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(CLAccessor(dst), ref_dst);
@@ -484,7 +484,7 @@
     CLTensor dst = compute_depth_convert(shape, DataType::F32, dt, policy, 0, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, dt, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, dt, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(CLAccessor(dst), ref_dst);
diff --git a/tests/validation/FixedPoint.h b/tests/validation/FixedPoint.h
index ab6f14a..12ffcdf 100644
--- a/tests/validation/FixedPoint.h
+++ b/tests/validation/FixedPoint.h
@@ -244,15 +244,20 @@
     {
         assert(p > 0 && p < std::numeric_limits<T>::digits);
 
+        using promoted_T = typename traits::promote<T>::type;
+        promoted_T val   = _value;
         if(p > _fixed_point_position)
         {
-            _value <<= (p - _fixed_point_position);
+            val <<= (p - _fixed_point_position);
         }
         else if(p < _fixed_point_position)
         {
-            _value >>= (_fixed_point_position - p);
+            uint8_t pbar = _fixed_point_position - p;
+            val += (pbar != 0) ? (1 << (pbar - 1)) : 0;
+            val >>= pbar;
         }
 
+        _value                = detail::constant_expr<T>::saturate_cast(val);
         _fixed_point_position = p;
     }
 
diff --git a/tests/validation/NEON/DepthConvert.cpp b/tests/validation/NEON/DepthConvert.cpp
index 65d3ab1..8a30c74 100644
--- a/tests/validation/NEON/DepthConvert.cpp
+++ b/tests/validation/NEON/DepthConvert.cpp
@@ -51,20 +51,22 @@
 {
 /** Compute Neon depth convert function.
  *
- * @param[in] shape                Shape of the input and output tensors.
- * @param[in] dt_in                Data type of input tensor.
- * @param[in] dt_out               Data type of the output tensor.
- * @param[in] policy               Conversion policy.
- * @param[in] shift                Value for down/up conversions. Must be 0 <= shift < 8.
- * @param[in] fixed_point_position Fixed point position.
+ * @param[in] shape                    Shape of the input and output tensors.
+ * @param[in] dt_in                    Data type of input tensor.
+ * @param[in] dt_out                   Data type of the output tensor.
+ * @param[in] policy                   Conversion policy.
+ * @param[in] shift                    Value for down/up conversions. Must be 0 <= shift < 8.
+ * @param[in] fixed_point_position_in  (Optional) Fixed point position for the input tensor.
+ * @param[in] fixed_point_position_out (Optional) Fixed point position for the output tensor.
  *
  * @return Computed output tensor.
  */
-Tensor compute_depth_convert(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position)
+Tensor compute_depth_convert(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy,
+                             uint32_t shift, uint32_t fixed_point_position_in = 0, uint32_t fixed_point_position_out = 0)
 {
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, dt_in, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, dt_out, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, dt_in, 1, fixed_point_position_in);
+    Tensor dst = create_tensor<Tensor>(shape, dt_out, 1, fixed_point_position_out);
 
     // Create and configure function
     NEDepthConvert depth_convert;
@@ -87,20 +89,22 @@
 }
 /** Configure and validate region/padding function.
  *
- * @param[in] shape                Shape of the input and output tensors.
- * @param[in] dt_in                Data type of input tensor.
- * @param[in] dt_out               Data type of the output tensor.
- * @param[in] policy               Conversion policy.
- * @param[in] shift                Value for down/up conversions. Must be 0 <= shift < 8.
- * @param[in] fixed_point_position Fixed point position.
+ * @param[in] shape                    Shape of the input and output tensors.
+ * @param[in] dt_in                    Data type of input tensor.
+ * @param[in] dt_out                   Data type of the output tensor.
+ * @param[in] policy                   Conversion policy.
+ * @param[in] shift                    Value for down/up conversions. Must be 0 <= shift < 8.
+ * @param[in] fixed_point_position_in  (Optional) Fixed point position for the input tensor.
+ * @param[in] fixed_point_position_out (Optional) Fixed point position for the output tensor.
  *
  */
 
-void compute_configure_validate(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position)
+void compute_configure_validate(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy,
+                                uint32_t shift, uint32_t fixed_point_position_in = 0, uint32_t fixed_point_position_out = 0)
 {
     // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, dt_in, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, dt_out, 1, fixed_point_position);
+    Tensor src = create_tensor<Tensor>(shape, dt_in, 1, fixed_point_position_in);
+    Tensor dst = create_tensor<Tensor>(shape, dt_out, 1, fixed_point_position_out);
 
     BOOST_TEST(src.info()->is_resizable());
     BOOST_TEST(dst.info()->is_resizable());
@@ -125,6 +129,32 @@
 BOOST_AUTO_TEST_SUITE(NEON)
 BOOST_AUTO_TEST_SUITE(DepthConvert)
 
+BOOST_AUTO_TEST_SUITE(QS8_to_QS8)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * boost::unit_test::data::make({ ConvertPolicy::SATURATE })
+                     * (boost::unit_test::data::make({ 1, 3, 5, 6 }) ^ boost::unit_test::data::make({ 6, 5, 1, 3 })),
+                     shape, policy, fixed_point_position_in, fixed_point_position_out)
+{
+    // Compute configure and validate region/padding
+    compute_configure_validate(shape, DataType::QS8, DataType::QS8, policy, 0, fixed_point_position_in, fixed_point_position_out);
+}
+
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::make({ ConvertPolicy::SATURATE })
+                     * (boost::unit_test::data::make({ 1, 3, 5, 6 }) ^ boost::unit_test::data::make({ 6, 5, 1, 3 })),
+                     shape, policy, fixed_point_position_in, fixed_point_position_out)
+{
+    // Compute function
+    Tensor dst = compute_depth_convert(shape, DataType::QS8, DataType::QS8, policy, 0, fixed_point_position_in, fixed_point_position_out);
+
+    // Compute reference
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS8, DataType::QS8, policy, 0, fixed_point_position_in, fixed_point_position_out);
+
+    // Validate output
+    validate(NEAccessor(dst), ref_dst);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
 BOOST_AUTO_TEST_SUITE(QS8_to_F32)
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
 BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * boost::unit_test::data::make({ ConvertPolicy::SATURATE })
@@ -132,7 +162,7 @@
                      shape, policy, fixed_point_position)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position);
+    compute_configure_validate(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -141,10 +171,10 @@
                      shape, policy, fixed_point_position)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position);
+    Tensor dst = compute_depth_convert(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -156,15 +186,14 @@
                      shape, policy, fixed_point_position)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position);
+    Tensor dst = compute_depth_convert(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS8, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
 }
-
 BOOST_AUTO_TEST_SUITE_END()
 
 BOOST_AUTO_TEST_SUITE(F32_to_QS8)
@@ -174,7 +203,7 @@
                      shape, policy, fixed_point_position)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position);
+    compute_configure_validate(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position, fixed_point_position);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -183,10 +212,10 @@
                      shape, policy, fixed_point_position)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position);
+    Tensor dst = compute_depth_convert(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -198,10 +227,36 @@
                      shape, policy, fixed_point_position)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position);
+    Tensor dst = compute_depth_convert(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, DataType::QS8, policy, 0, fixed_point_position, fixed_point_position);
+
+    // Validate output
+    validate(NEAccessor(dst), ref_dst);
+}
+BOOST_AUTO_TEST_SUITE_END()
+
+BOOST_AUTO_TEST_SUITE(QS16_to_QS16)
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
+BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * boost::unit_test::data::make({ ConvertPolicy::SATURATE })
+                     * (boost::unit_test::data::make({ 3, 6, 7, 13, 14 }) ^ boost::unit_test::data::make({ 5, 10, 14, 4, 7 })),
+                     shape, policy, fixed_point_position_in, fixed_point_position_out)
+{
+    // Compute configure and validate region/padding
+    compute_configure_validate(shape, DataType::QS16, DataType::QS16, policy, 0, fixed_point_position_in, fixed_point_position_out);
+}
+
+BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
+BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::make({ ConvertPolicy::SATURATE })
+                     * (boost::unit_test::data::make({ 3, 6, 7, 13, 14 }) ^ boost::unit_test::data::make({ 5, 10, 14, 4, 7 })),
+                     shape, policy, fixed_point_position_in, fixed_point_position_out)
+{
+    // Compute function
+    Tensor dst = compute_depth_convert(shape, DataType::QS16, DataType::QS16, policy, 0, fixed_point_position_in, fixed_point_position_out);
+
+    // Compute reference
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS16, DataType::QS16, policy, 0, fixed_point_position_in, fixed_point_position_out);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -215,7 +270,7 @@
                      shape, policy, fixed_point_position)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position);
+    compute_configure_validate(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -224,10 +279,10 @@
                      shape, policy, fixed_point_position)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position);
+    Tensor dst = compute_depth_convert(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -239,10 +294,10 @@
                      shape, policy, fixed_point_position)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position);
+    Tensor dst = compute_depth_convert(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::QS16, DataType::F32, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -257,7 +312,7 @@
                      shape, policy, fixed_point_position)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position);
+    compute_configure_validate(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position, fixed_point_position);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -266,10 +321,10 @@
                      shape, policy, fixed_point_position)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position);
+    Tensor dst = compute_depth_convert(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -281,10 +336,10 @@
                      shape, policy, fixed_point_position)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position);
+    Tensor dst = compute_depth_convert(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position, fixed_point_position);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::F32, DataType::QS16, policy, 0, fixed_point_position, fixed_point_position);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -308,10 +363,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::U16, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::U16, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::U16, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::U16, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -322,10 +377,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::U16, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::U16, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::U16, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::U16, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -339,7 +394,7 @@
                      shape, policy, shift)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::U8, DataType::S16, policy, shift, 0);
+    compute_configure_validate(shape, DataType::U8, DataType::S16, policy, shift);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -348,10 +403,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::S16, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::S16, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::S16, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::S16, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -363,10 +418,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::S16, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::S16, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::S16, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::S16, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -380,7 +435,7 @@
                      shape, policy, shift)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::U8, DataType::S32, policy, shift, 0);
+    compute_configure_validate(shape, DataType::U8, DataType::S32, policy, shift);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -389,10 +444,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::S32, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::S32, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::S32, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::S32, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -404,10 +459,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::S32, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U8, DataType::S32, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::S32, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U8, DataType::S32, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -421,7 +476,7 @@
                      shape, policy, shift)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::U16, DataType::U8, policy, shift, 0);
+    compute_configure_validate(shape, DataType::U16, DataType::U8, policy, shift);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -430,10 +485,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U16, DataType::U8, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U16, DataType::U8, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U16, DataType::U8, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U16, DataType::U8, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -445,10 +500,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U16, DataType::U8, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U16, DataType::U8, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U16, DataType::U8, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U16, DataType::U8, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -462,7 +517,7 @@
                      shape, policy, shift)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::U16, DataType::U32, policy, shift, 0);
+    compute_configure_validate(shape, DataType::U16, DataType::U32, policy, shift);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -471,10 +526,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U16, DataType::U32, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U16, DataType::U32, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U16, DataType::U32, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U16, DataType::U32, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -486,10 +541,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::U16, DataType::U32, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::U16, DataType::U32, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U16, DataType::U32, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::U16, DataType::U32, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -503,7 +558,7 @@
                      shape, policy, shift)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::S16, DataType::U8, policy, shift, 0);
+    compute_configure_validate(shape, DataType::S16, DataType::U8, policy, shift);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -512,10 +567,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::S16, DataType::U8, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::S16, DataType::U8, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::S16, DataType::U8, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::S16, DataType::U8, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -527,10 +582,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::S16, DataType::U8, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::S16, DataType::U8, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::S16, DataType::U8, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::S16, DataType::U8, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -544,7 +599,7 @@
                      shape, policy, shift)
 {
     // Compute configure and validate region/padding
-    compute_configure_validate(shape, DataType::S16, DataType::S32, policy, shift, 0);
+    compute_configure_validate(shape, DataType::S16, DataType::S32, policy, shift);
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
@@ -553,10 +608,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::S16, DataType::S32, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::S16, DataType::S32, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::S16, DataType::S32, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::S16, DataType::S32, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
@@ -568,10 +623,10 @@
                      shape, policy, shift)
 {
     // Compute function
-    Tensor dst = compute_depth_convert(shape, DataType::S16, DataType::S32, policy, shift, 0);
+    Tensor dst = compute_depth_convert(shape, DataType::S16, DataType::S32, policy, shift);
 
     // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::S16, DataType::S32, policy, shift, 0);
+    RawTensor ref_dst = Reference::compute_reference_depth_convert(shape, DataType::S16, DataType::S32, policy, shift);
 
     // Validate output
     validate(NEAccessor(dst), ref_dst);
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index acf010e..4b4919f 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -283,10 +283,11 @@
     return ref_dst;
 }
 
-RawTensor Reference::compute_reference_depth_convert(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position)
+RawTensor Reference::compute_reference_depth_convert(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy,
+                                                     uint32_t shift, uint32_t fixed_point_position_in, uint32_t fixed_point_position_out)
 {
-    RawTensor ref_src = library->get(shape, dt_in, 1, fixed_point_position);
-    RawTensor ref_dst = library->get(shape, dt_out, 1, fixed_point_position);
+    RawTensor ref_src = library->get(shape, dt_in, 1, fixed_point_position_in);
+    RawTensor ref_dst = library->get(shape, dt_out, 1, fixed_point_position_out);
 
     // Fill reference
     library->fill_tensor_uniform(ref_src, 0);
diff --git a/tests/validation/Reference.h b/tests/validation/Reference.h
index 3ad9814..a3ae3b6 100644
--- a/tests/validation/Reference.h
+++ b/tests/validation/Reference.h
@@ -168,16 +168,18 @@
     static RawTensor compute_reference_box3x3(const TensorShape &shape, BorderMode border_mode, uint8_t constant_border_value);
     /** Compute reference depth convert.
      *
-     * @param[in] shape                Shape of the input and output tensors.
-     * @param[in] dt_in                Data type of input tensor.
-     * @param[in] dt_out               Data type of the output tensor.
-     * @param[in] policy               Overflow policy of the operation.
-     * @param[in] shift                Value for down/up conversions. Must be 0 <= shift < 8.
-     * @param[in] fixed_point_position Fixed point position.
+     * @param[in] shape                    Shape of the input and output tensors.
+     * @param[in] dt_in                    Data type of input tensor.
+     * @param[in] dt_out                   Data type of the output tensor.
+     * @param[in] policy                   Overflow policy of the operation.
+     * @param[in] shift                    Value for down/up conversions. Must be 0 <= shift < 8.
+     * @param[in] fixed_point_position_in  (Optional) Fixed point position for the input tensor.
+     * @param[in] fixed_point_position_out (Optional) Fixed point position for the output tensor.
      *
      * @return Computed raw tensor.
      */
-    static RawTensor compute_reference_depth_convert(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, uint32_t fixed_point_position);
+    static RawTensor compute_reference_depth_convert(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy,
+                                                     uint32_t shift, uint32_t fixed_point_position_in = 0, uint32_t fixed_point_position_out = 0);
     /** Compute reference gaussian3x3 filter.
      *
      * @param[in] shape                 Shape of the input and output tensors.
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 9e6f5cf..882c9e0 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -519,7 +519,7 @@
 }
 
 // Depth conversion
-template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&is_floating_point<T2>::value, int >::type = 0 >
+template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_floating_point<T2>::value, int >::type = 0 >
 void depth_convert(const Tensor<T1> &in, Tensor<T2> &out, ConvertPolicy policy, uint32_t shift)
 {
     using namespace fixed_point_arithmetic;
@@ -531,7 +531,7 @@
     }
 }
 
-template < typename T1, typename T2, typename std::enable_if < is_floating_point<T1>::value &&std::is_integral<T2>::value, int >::type = 0 >
+template < typename T1, typename T2, typename std::enable_if < std::is_floating_point<T1>::value &&std::is_integral<T2>::value, int >::type = 0 >
 void depth_convert(const Tensor<T1> &in, Tensor<T2> &out, ConvertPolicy policy, uint32_t shift)
 {
     using namespace fixed_point_arithmetic;
@@ -543,7 +543,7 @@
     }
 }
 
-template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_integral<T2>::value, int >::type = 0 >
+template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_integral<T2>::value &&!std::is_same<T1, T2>::value, int >::type = 0 >
 void depth_convert(const Tensor<T1> &in, Tensor<T2> &out, ConvertPolicy policy, uint32_t shift)
 {
     // Up-casting
@@ -565,6 +565,26 @@
     }
 }
 
+template < typename T1, typename T2, typename std::enable_if < std::is_integral<T1>::value &&std::is_integral<T2>::value &&std::is_same<T1, T2>::value, int >::type = 0 >
+void depth_convert(const Tensor<T1> &in, Tensor<T2> &out, ConvertPolicy policy, uint32_t shift)
+{
+    using namespace fixed_point_arithmetic;
+    bool is_in_place = (&in == &out);
+
+    const int fixed_point_position_in  = in.fixed_point_position();
+    const int fixed_point_position_out = (is_in_place) ? static_cast<int>(shift) : out.fixed_point_position();
+
+    if(!is_in_place || (fixed_point_position_in != fixed_point_position_out))
+    {
+        for(int i = 0; i < in.num_elements(); ++i)
+        {
+            auto x = fixed_point<T2>(in[i], fixed_point_position_in, true);
+            x.rescale(fixed_point_position_out);
+            out[i] = x.raw();
+        }
+    }
+}
+
 template < typename T1, typename T2, typename std::enable_if < is_floating_point<T1>::value &&is_floating_point<T2>::value, int >::type = 0 >
 void depth_convert(const Tensor<T1> &in, Tensor<T2> &out, ConvertPolicy policy, uint32_t shift)
 {