COMPMID-617: Add validation functions.

Added validation routines to the following kernels.
-CLActivationLayer
-CLBatchNormalizationLayer
-CLArithmeticAddition
-CLArithmeticSubtraction
-CLPixelwiseMultiplication

Change-Id: I0f3a03154f9e392279f715af656683cd0ad4cef5
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94595
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
index dab133f..30bf6fb 100644
--- a/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLActivationLayerKernel.h
@@ -56,6 +56,16 @@
      * @param[in]      act_info Activation layer information.
      */
     void configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayerKernel
+     *
+     * @param[in] input    Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
+     *                     of the activation function. Data types supported: QS8/QS16/F16/F32.
+     * @param[in] output   Destination tensor info. Data type supported: same as @p input
+     * @param[in] act_info Activation layer information.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
index 0895fe3..aa4b9d6 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
@@ -59,6 +59,16 @@
      * @param[in]  policy Policy to use to handle overflow.
      */
     void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAdditionKernel
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+     * @param[in] policy Policy to use to handle overflow.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
index d7755d5..13136d9 100644
--- a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
+++ b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
@@ -61,6 +61,16 @@
      * @param[in]  policy Policy to use to handle overflow.
      */
     void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtractionKernel
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+     * @param[in] policy Policy to use to handle overflow.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index 26825ef..a244321 100644
--- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -63,6 +63,24 @@
      * @param[out]     output  Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      */
     void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayerKernel
+     *
+     * @param[in] input   Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
+     *                    3 lower dimensions represent a single input with dimensions [width, height, FM].
+     * @param[in] output  Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
+     *                    The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+     * @param[in] mean    Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] var     Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] gamma   Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] beta    Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] epsilon Small value to avoid division with zero.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input, const ITensorInfo *output,
+                          const ITensorInfo *mean, const ITensorInfo *var,
+                          const ITensorInfo *beta, const ITensorInfo *gamma,
+                          float epsilon);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
index 309a202..ed876df 100644
--- a/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h
@@ -59,6 +59,20 @@
      */
     void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
                    ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplicationKernel
+     *
+     * @param[in] input1          An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+     * @param[in] input2          An input tensor info. Data types supported: same as @p input1.
+     * @param[in] output          The output tensor info, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+     * @param[in] scale           Scale to apply after multiplication.
+     *                            Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+     * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
+     * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+                          ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
index 29190e2..a9159a4 100644
--- a/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLPoolingLayerKernel.h
@@ -60,11 +60,11 @@
     void configure(const ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info);
     /** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayerKernel
      *
-     * @param[in] input     Input's tensor info
-     * @param[in] output    Output's tensor info
+     * @param[in] input     Source tensor info. Data types supported: QS8/QS16/F16/F32.
+     * @param[in] output    Destination tensor info. Data types supported: Same as @p input.
      * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
      *
-     * @return an error
+     * @return an error status
      */
     static Error validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
 
diff --git a/arm_compute/core/Error.h b/arm_compute/core/Error.h
index 6e4aa6a..fa3f9c0 100644
--- a/arm_compute/core/Error.h
+++ b/arm_compute/core/Error.h
@@ -240,7 +240,7 @@
  *
  * @param[in] error Error value to check.
  */
-#define ARM_COMPUTE_ERROR_THROW(error) \
+#define ARM_COMPUTE_ERROR_THROW_ON(error) \
     error.throw_if_error();
 
 /** If the condition is true, the given message is printed and an exception is thrown
@@ -282,7 +282,7 @@
  */
 #define ARM_COMPUTE_CONST_ON_ERROR(cond, val, msg) (cond) ? throw std::logic_error(msg) : val;
 #else /* ARM_COMPUTE_ASSERTS_ENABLED */
-#define ARM_COMPUTE_ERROR_THROW(error)
+#define ARM_COMPUTE_ERROR_THROW_ON(error)
 #define ARM_COMPUTE_ERROR_ON_MSG(cond, ...)
 #define ARM_COMPUTE_ERROR_ON_LOC_MSG(cond, func, file, line, ...)
 #define ARM_COMPUTE_CONST_ON_ERROR(cond, val, msg) val
diff --git a/arm_compute/core/Validate.h b/arm_compute/core/Validate.h
index 1e2d262..2ca9f6b 100644
--- a/arm_compute/core/Validate.h
+++ b/arm_compute/core/Validate.h
@@ -142,7 +142,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_NULLPTR(...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_nullptr(__func__, __FILE__, __LINE__, __VA_ARGS__))
 
@@ -164,7 +164,7 @@
 arm_compute::Error error_on_mismatching_windows(const char *function, const char *file, const int line,
                                                 const Window &full, const Window &win);
 #define ARM_COMPUTE_ERROR_ON_MISMATCHING_WINDOWS(f, w) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
 #define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_WINDOWS(f, w) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_windows(__func__, __FILE__, __LINE__, f, w))
 
@@ -186,7 +186,7 @@
 arm_compute::Error error_on_invalid_subwindow(const char *function, const char *file, const int line,
                                               const Window &full, const Window &sub);
 #define ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(f, s) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
 #define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBWINDOW(f, s) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subwindow(__func__, __FILE__, __LINE__, f, s))
 
@@ -206,7 +206,7 @@
 arm_compute::Error error_on_window_not_collapsable_at_dimension(const char *function, const char *file, const int line,
                                                                 const Window &full, const Window &window, const int dim);
 #define ARM_COMPUTE_ERROR_ON_WINDOW_NOT_COLLAPSABLE_AT_DIMENSION(f, w, d) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
 #define ARM_COMPUTE_RETURN_ERROR_ON_WINDOW_NOT_COLLAPSABLE_AT_DIMENSION(f, w, d) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_window_not_collapsable_at_dimension(__func__, __FILE__, __LINE__, f, w, d))
 
@@ -225,7 +225,7 @@
 arm_compute::Error error_on_coordinates_dimensions_gte(const char *function, const char *file, const int line,
                                                        const Coordinates &pos, unsigned int max_dim);
 #define ARM_COMPUTE_ERROR_ON_COORDINATES_DIMENSIONS_GTE(p, md) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
 #define ARM_COMPUTE_RETURN_ERROR_ON_COORDINATES_DIMENSIONS_GTE(p, md) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_coordinates_dimensions_gte(__func__, __FILE__, __LINE__, p, md))
 
@@ -244,7 +244,7 @@
 arm_compute::Error error_on_window_dimensions_gte(const char *function, const char *file, const int line,
                                                   const Window &win, unsigned int max_dim);
 #define ARM_COMPUTE_ERROR_ON_WINDOW_DIMENSIONS_GTE(w, md) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
 #define ARM_COMPUTE_RETURN_ERROR_ON_WINDOW_DIMENSIONS_GTE(w, md) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_window_dimensions_gte(__func__, __FILE__, __LINE__, w, md))
 
@@ -267,7 +267,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_dimensions(__func__, __FILE__, __LINE__, __VA_ARGS__))
 
@@ -357,7 +357,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_shapes(__func__, __FILE__, __LINE__, __VA_ARGS__))
 
@@ -408,7 +408,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_data_types(__func__, __FILE__, __LINE__, __VA_ARGS__))
 
@@ -432,7 +432,7 @@
     DataType &&first_data_type            = tensor_info_1->data_type();
     const int  first_fixed_point_position = tensor_info_1->fixed_point_position();
 
-    if((first_data_type != DataType::QS8) && (first_data_type != DataType::QS16))
+    if(!is_data_type_fixed_point(first_data_type))
     {
         return arm_compute::Error{};
     }
@@ -473,7 +473,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
 
@@ -556,7 +556,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(t, ...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(t, ...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_not_in(__func__, __FILE__, __LINE__, t, __VA_ARGS__))
 
@@ -602,7 +602,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c, ...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c, ...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_data_type_channel_not_in(__func__, __FILE__, __LINE__, t, c, __VA_ARGS__))
 
@@ -618,7 +618,7 @@
 arm_compute::Error error_on_tensor_not_2d(const char *function, const char *file, const int line,
                                           const ITensor *tensor);
 #define ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(t) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
 #define ARM_COMPUTE_RETURN_ERROR_ON_TENSOR_NOT_2D(t) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_tensor_not_2d(__func__, __FILE__, __LINE__, t))
 
@@ -648,7 +648,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_CHANNEL_NOT_IN(c, ...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_CHANNEL_NOT_IN(c, ...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_channel_not_in(__func__, __FILE__, __LINE__, c, __VA_ARGS__))
 
@@ -665,7 +665,7 @@
 arm_compute::Error error_on_channel_not_in_known_format(const char *function, const char *file, const int line,
                                                         Format fmt, Channel cn);
 #define ARM_COMPUTE_ERROR_ON_CHANNEL_NOT_IN_KNOWN_FORMAT(f, c) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
 #define ARM_COMPUTE_RETURN_ERROR_ON_CHANNEL_NOT_IN_KNOWN_FORMAT(f, c) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_channel_not_in_known_format(__func__, __FILE__, __LINE__, f, c))
 
@@ -687,7 +687,7 @@
 arm_compute::Error error_on_invalid_multi_hog(const char *function, const char *file, const int line,
                                               const IMultiHOG *multi_hog);
 #define ARM_COMPUTE_ERROR_ON_INVALID_MULTI_HOG(m) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
 #define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_MULTI_HOG(m) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_multi_hog(__func__, __FILE__, __LINE__, m))
 
@@ -701,7 +701,7 @@
 arm_compute::Error error_on_unconfigured_kernel(const char *function, const char *file, const int line,
                                                 const IKernel *kernel);
 #define ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(k) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
 #define ARM_COMPUTE_RETURN_ERROR_ON_UNCONFIGURED_KERNEL(k) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_unconfigured_kernel(__func__, __FILE__, __LINE__, k))
 
@@ -719,7 +719,7 @@
 arm_compute::Error error_on_invalid_subtensor(const char *function, const char *file, const int line,
                                               const TensorShape &parent_shape, const Coordinates &coords, const TensorShape &shape);
 #define ARM_COMPUTE_ERROR_ON_INVALID_SUBTENSOR(p, c, s) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
 #define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBTENSOR(p, c, s) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, p, c, s))
 
@@ -736,7 +736,7 @@
 arm_compute::Error error_on_invalid_subtensor_valid_region(const char *function, const char *file, const int line,
                                                            const ValidRegion &parent_valid_region, const ValidRegion &valid_region);
 #define ARM_COMPUTE_ERROR_ON_INVALID_SUBTENSOR_VALID_REGION(pv, sv) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
 #define ARM_COMPUTE_RETURN_ERROR_ON_INVALID_SUBTENSOR_VALID_REGION(pv, sv) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_invalid_subtensor_valid_region(__func__, __FILE__, __LINE__, pv, sv))
 
@@ -783,7 +783,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT_POSITION(...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_mismatching_fixed_point_position(__func__, __FILE__, __LINE__, __VA_ARGS__))
 
@@ -827,7 +827,7 @@
     return arm_compute::Error{};
 }
 #define ARM_COMPUTE_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(...) \
-    ARM_COMPUTE_ERROR_THROW(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
 #define ARM_COMPUTE_RETURN_ERROR_ON_VALUE_NOT_REPRESENTABLE_IN_FIXED_POINT(...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_value_not_representable_in_fixed_point(__func__, __FILE__, __LINE__, __VA_ARGS__))
 }
diff --git a/arm_compute/runtime/CL/functions/CLActivationLayer.h b/arm_compute/runtime/CL/functions/CLActivationLayer.h
index a1aeb19..037ae71 100644
--- a/arm_compute/runtime/CL/functions/CLActivationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLActivationLayer.h
@@ -49,6 +49,16 @@
      * @param[in]      act_info Activation layer parameters.
      */
     void configure(ICLTensor *input, ICLTensor *output, ActivationLayerInfo act_info);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLActivationLayer
+     *
+     * @param[in] input    Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
+     *                     of the activation function. Data types supported: QS8/QS16/F16/F32.
+     * @param[in] output   Destination tensor info. Data type supported: same as @p input
+     * @param[in] act_info Activation layer information.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
 };
 }
 #endif /* __ARM_COMPUTE_CLACTIVATIONLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
index f888256..0c54147 100644
--- a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
+++ b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
@@ -47,6 +47,16 @@
      * @param[in]  policy Policy to use to handle overflow.
      */
     void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAddition
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+     * @param[in] policy Policy to use to handle overflow.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
 };
 }
 #endif /* __ARM_COMPUTE_CLARITHMETICADDITION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h b/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
index eedeaa8..749f1b2 100644
--- a/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
+++ b/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
@@ -48,6 +48,16 @@
      * @param[in]  policy Policy to use to handle overflow.
      */
     void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtraction
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8/QS8 (only if @p input1 is QS8), QS16 (only if @p input1 is QS16), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QS8 (only if both inputs are QS8), QS16 (only if both inputs are QS16), S16/F16/F32.
+     * @param[in] policy Policy to use to handle overflow.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
 };
 }
 #endif /* __ARM_COMPUTE_CLARITHMETICSUBTRACTION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
index ffb66be..70a201a 100644
--- a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
@@ -51,14 +51,32 @@
      * @param[in, out] input   Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
      *                         3 lower dimensions represent a single input with dimensions [width, height, FM].
      *                         The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+     * @param[out]     output  Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in]      mean    Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      var     Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      gamma   Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      beta    Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      epsilon Small value to avoid division with zero.
-     * @param[out]     output  Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      */
     void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayer
+     *
+     * @param[in] input   Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
+     *                    3 lower dimensions represent a single input with dimensions [width, height, FM].
+     * @param[in] output  Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
+     *                    The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+     * @param[in] mean    Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] var     Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] gamma   Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] beta    Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] epsilon Small value to avoid division with zero.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input, const ITensorInfo *output,
+                          const ITensorInfo *mean, const ITensorInfo *var,
+                          const ITensorInfo *beta, const ITensorInfo *gamma,
+                          float epsilon);
 
     // Inherited methods overridden:
     void run() override;
diff --git a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
index 71754fc..4fdcd75 100644
--- a/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
+++ b/arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h
@@ -37,15 +37,30 @@
 public:
     /** Initialise the kernel's inputs, output and convertion policy.
      *
-     * @param[in]  input1          First tensor input. Data types supported: U8, S16, F16 or F32.
-     * @param[in]  input2          Second tensor input. Data types supported: U8, S16, F16 or F32.
-     * @param[out] output          Output tensor. Data types supported: U8(Only if both inputs are U8), S16, F16 or F32.
-     * @param[in]  scale           Scale to apply after multiplication. Must be positive.
+     * @param[in]  input1          An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32.
+     * @param[in]  input2          An input tensor. Data types supported: same as @p input1.
+     * @param[out] output          The output tensor, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+     * @param[in]  scale           Scale to apply after multiplication.
+     *                             Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
      * @param[in]  overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
      * @param[in]  rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
      */
     void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
                    ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLPixelWiseMultiplication
+     *
+     * @param[in] input1          An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+     * @param[in] input2          An input tensor info. Data types supported: same as @p input1.
+     * @param[in] output          The output tensor info, Data types supported: same as @p input1. Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+     * @param[in] scale           Scale to apply after multiplication.
+     *                            Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+     * @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
+     * @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
+     *
+     * @return an error status
+     */
+    static Error validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale,
+                          ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
 };
 }
 #endif /*__ARM_COMPUTE_CLPIXELWISEMULTIPLICATION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLPoolingLayer.h b/arm_compute/runtime/CL/functions/CLPoolingLayer.h
index f7fd114..9c51534 100644
--- a/arm_compute/runtime/CL/functions/CLPoolingLayer.h
+++ b/arm_compute/runtime/CL/functions/CLPoolingLayer.h
@@ -50,11 +50,11 @@
     void configure(ICLTensor *input, ICLTensor *output, const PoolingLayerInfo &pool_info);
     /** Static function to check if given info will lead to a valid configuration of @ref CLPoolingLayer
      *
-     * @param[in] input     Input's tensor info
-     * @param[in] output    Output's tensor info
+     * @param[in] input     Source tensor info. Data types supported: QS8/QS16/F16/F32.
+     * @param[in] output    Destination tensor info. Data types supported: Same as @p input.
      * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
      *
-     * @return an expected value
+     * @return an error status
      */
     static Error validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info);
 };