COMPMID-2920: NEInstanceNormalization fails on NHWC validations

Improved TensorInfo to accept DataLayout, useful to test the validate functions
Removing nighlies tests
Moving all vpadds instructions in add.h

Change-Id: I96290a6f26272eae865dba48bbc3c6aee4bc0214
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2287
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h
index 9745d26..c341197 100644
--- a/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h
@@ -53,6 +53,7 @@
     /** Set the input and output tensors.
      *
      * @param[in, out] input   Source tensor. Data types supported: F16/F32. Data layout supported: NCHW
+     *                         In case of @p output tensor = nullptr this tensor will store the result of the normalization.
      * @param[out]     output  Destination tensor. Data types and data layouts supported: same as @p input.
      * @param[in]      gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
      * @param[in]      beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
@@ -62,8 +63,7 @@
 
     /** Static function to check if given info will lead to a valid configuration of @ref NEInstanceNormalizationLayer.
      *
-     * @param[in] input   Source tensor info. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
-     *                    Data types supported: F16/F32. Data layout supported: NCHW
+     * @param[in] input   Source tensor info. Data types supported: F16/F32. Data layout supported: NCHW
      * @param[in] output  Destination tensor info. Data types and data layouts supported: same as @p input.
      * @param[in] gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
      * @param[in] beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/add.h b/arm_compute/core/NEON/wrapper/intrinsics/add.h
index 1839170..f082346 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/add.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/add.h
@@ -176,6 +176,26 @@
 VPADDL_IMPL(uint64x2_t, uint32x4_t, vpaddlq, u32)
 VPADDL_IMPL(int64x2_t, int32x4_t, vpaddlq, s32)
 #undef VPADDL_IMPL
+
+// VPADD: Add pairwise
+#define VPADD_IMPL(stype, vtype, prefix, postfix)      \
+    inline vtype vpadd(const vtype &a, const vtype &b) \
+    {                                                  \
+        return prefix##_##postfix(a, b);               \
+    }
+
+VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8)
+VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8)
+VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16)
+VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16)
+VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32)
+VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32)
+VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32)
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16)
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+
+#undef VPADD_IMPL
 } // namespace wrapper
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_WRAPPER_ADD_H__ */
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
index 6eae1cf..d9b8297 100644
--- a/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
+++ b/arm_compute/core/NEON/wrapper/intrinsics/intrinsics.h
@@ -53,7 +53,6 @@
 #include "arm_compute/core/NEON/wrapper/intrinsics/neg.h"
 #include "arm_compute/core/NEON/wrapper/intrinsics/not.h"
 #include "arm_compute/core/NEON/wrapper/intrinsics/orr.h"
-#include "arm_compute/core/NEON/wrapper/intrinsics/padd.h"
 #include "arm_compute/core/NEON/wrapper/intrinsics/pmax.h"
 #include "arm_compute/core/NEON/wrapper/intrinsics/pmin.h"
 #include "arm_compute/core/NEON/wrapper/intrinsics/pow.h"
diff --git a/arm_compute/core/NEON/wrapper/intrinsics/padd.h b/arm_compute/core/NEON/wrapper/intrinsics/padd.h
deleted file mode 100644
index 5ee2173..0000000
--- a/arm_compute/core/NEON/wrapper/intrinsics/padd.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_WRAPPER_PADD_H__
-#define __ARM_COMPUTE_WRAPPER_PADD_H__
-
-#include <arm_neon.h>
-
-namespace arm_compute
-{
-namespace wrapper
-{
-#define VPADD_IMPL(stype, vtype, prefix, postfix)      \
-    inline vtype vpadd(const vtype &a, const vtype &b) \
-    {                                                  \
-        return prefix##_##postfix(a, b);               \
-    }
-
-VPADD_IMPL(uint8x8_t, uint8x8_t, vpadd, u8)
-VPADD_IMPL(int8x8_t, int8x8_t, vpadd, s8)
-VPADD_IMPL(uint16x4_t, uint16x4_t, vpadd, u16)
-VPADD_IMPL(int16x4_t, int16x4_t, vpadd, s16)
-VPADD_IMPL(uint32x2_t, uint32x2_t, vpadd, u32)
-VPADD_IMPL(int32x2_t, int32x2_t, vpadd, s32)
-VPADD_IMPL(float32x2_t, float32x2_t, vpadd, f32)
-#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-VPADD_IMPL(float16x4_t, float16x4_t, vpadd, f16)
-#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-
-#undef VPADD_IMPL
-} // namespace wrapper
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_WRAPPER_PADD_H__ */