COMPMID-2252 NECast.
Change-Id: I7532aea6827a325eb8457132d4787ac527e93cd4
Signed-off-by: Usama Arif <usama.arif@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1149
Reviewed-by: Pablo Marquez <pablo.tello@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h b/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h
index 16b8e42..c900e08 100644
--- a/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthConvertLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -30,7 +30,9 @@
{
class ITensor;
-/** Depth conversion kernel */
+/** Depth conversion kernel
+ * This function ignores the scale and zeroPoint of quanized tensors, i.e. QASYMM8 input is treated as uint8 values.
+ */
class NEDepthConvertLayerKernel : public INEKernel
{
public:
@@ -52,12 +54,13 @@
*
* Valid conversions Input -> Output :
*
- * - QASYMM8 -> F16, F32
- * - U8 -> U16, S16, S32
+ * - QASYMM8 -> U16, S16, S32, F32, F16
+ * - U8 -> U16, S16, S32, F32, F16
* - U16 -> U8, U32
* - S16 -> U8, S32
- * - F16 -> QASYMM8, F32
- * - F32 -> QASYMM8, F16
+ * - F16 -> QASYMM8, F32, S32, U8
+ * - S32 -> QASYMM8, F16, F32, U8
+ * - F32 -> QASYMM8, F16, S32, U8
*
* @param[in] input The input tensor to convert. Data types supported: QASYMM8/U8/U16/S16/F16/F32.
* @param[out] output The output tensor. Data types supported: QASYMM8/U8/U16/S16/U32/S32/F16/F32.