COMPMID-1717: CL: Implement Maximum, Minimum, SquaredDifference

Change-Id: Ice653e48211053bd3cd20a693bd76de6b4efc370
Reviewed-on: https://review.mlplatform.org/270
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index c7c1297..c707265 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -28,9 +28,6 @@
 #include "arm_compute/core/CL/kernels/CLAbsoluteDifferenceKernel.h"
 #include "arm_compute/core/CL/kernels/CLAccumulateKernel.h"
 #include "arm_compute/core/CL/kernels/CLActivationLayerKernel.h"
-#include "arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h"
-#include "arm_compute/core/CL/kernels/CLArithmeticDivisionKernel.h"
-#include "arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h"
 #include "arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h"
 #include "arm_compute/core/CL/kernels/CLBatchToSpaceLayerKernel.h"
 #include "arm_compute/core/CL/kernels/CLBitwiseAndKernel.h"
@@ -62,6 +59,7 @@
 #include "arm_compute/core/CL/kernels/CLDilateKernel.h"
 #include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerKernel.h"
 #include "arm_compute/core/CL/kernels/CLDirectConvolutionLayerOutputStageKernel.h"
+#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h"
 #include "arm_compute/core/CL/kernels/CLErodeKernel.h"
 #include "arm_compute/core/CL/kernels/CLFastCornersKernel.h"
 #include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
deleted file mode 100644
index 48e72f3..0000000
--- a/arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2016-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_CLARITHMETICADDITIONKERNEL_H__
-#define __ARM_COMPUTE_CLARITHMETICADDITIONKERNEL_H__
-
-#include "arm_compute/core/CL/ICLKernel.h"
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the arithmetic addition kernel
- *
- * Arithmetic addition is computed by:
- * @f[ output(x,y) = input1(x,y) + input2(x,y) @f]
- */
-class CLArithmeticAdditionKernel : public ICLKernel
-{
-public:
-    /** Default constructor */
-    CLArithmeticAdditionKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CLArithmeticAdditionKernel(const CLArithmeticAdditionKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CLArithmeticAdditionKernel &operator=(const CLArithmeticAdditionKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    CLArithmeticAdditionKernel(CLArithmeticAdditionKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    CLArithmeticAdditionKernel &operator=(CLArithmeticAdditionKernel &&) = default;
-    /** Default destructor */
-    ~CLArithmeticAdditionKernel() = default;
-    /** Initialise the kernel's inputs, output and conversion policy.
-     *
-     * @param[in]  input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32.
-     * @param[in]  input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
-     * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
-     * @param[in]  policy Policy to use to handle overflow.
-     */
-    void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAdditionKernel
-     *
-     * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/F16/F32.
-     * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
-     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
-     * @param[in] policy Policy to use to handle overflow.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
-
-    // Inherited methods overridden:
-    void run(const Window &window, cl::CommandQueue &queue) override;
-    BorderSize border_size() const override;
-
-private:
-    const ICLTensor *_input1; /**< Source tensor 1 */
-    const ICLTensor *_input2; /**< Source tensor 2 */
-    ICLTensor       *_output; /**< Destination tensor */
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_CLARITHMETICADDITIONKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLArithmeticDivisionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticDivisionKernel.h
deleted file mode 100644
index 430a641..0000000
--- a/arm_compute/core/CL/kernels/CLArithmeticDivisionKernel.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_CLARITHMETICDIVISIONKERNEL_H__
-#define __ARM_COMPUTE_CLARITHMETICDIVISIONKERNEL_H__
-
-#include "arm_compute/core/CL/ICLKernel.h"
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the arithmetic division kernel
- *
- * Arithmetic division is computed by:
- * @f[ output(x,y) = input1(x,y) / input2(x,y) @f]
- */
-class CLArithmeticDivisionKernel : public ICLKernel
-{
-public:
-    /** Default constructor */
-    CLArithmeticDivisionKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CLArithmeticDivisionKernel(const CLArithmeticDivisionKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CLArithmeticDivisionKernel &operator=(const CLArithmeticDivisionKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    CLArithmeticDivisionKernel(CLArithmeticDivisionKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    CLArithmeticDivisionKernel &operator=(CLArithmeticDivisionKernel &&) = default;
-    /** Default destructor */
-    ~CLArithmeticDivisionKernel() = default;
-    /** Initialise the kernel's inputs, output.
-     *
-     * @param[in]  input1 First tensor input. Data types supported: F16/F32.
-     * @param[in]  input2 Second tensor input. Data types supported: Same as @p input1.
-     * @param[out] output Output tensor. Data types supported: Same as @p input1.
-     */
-    void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivisionKernel
-     *
-     * @param[in] input1 First tensor input info. Data types supported: F16/F32.
-     * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
-     * @param[in] output Output tensor info. Data types supported: Same as @p input1.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
-
-    // Inherited methods overridden:
-    void run(const Window &window, cl::CommandQueue &queue) override;
-    BorderSize border_size() const override;
-
-private:
-    const ICLTensor *_input1; /**< Source tensor 1 */
-    const ICLTensor *_input2; /**< Source tensor 2 */
-    ICLTensor       *_output; /**< Destination tensor */
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_CLARITHMETICDIVISIONKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h b/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
deleted file mode 100644
index 9875ac7..0000000
--- a/arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2016-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_CLARITHMETICSUBTRACTIONKERNEL_H__
-#define __ARM_COMPUTE_CLARITHMETICSUBTRACTIONKERNEL_H__
-
-#include "arm_compute/core/CL/ICLKernel.h"
-
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Interface for the arithmetic subtraction kernel
- *
- * Arithmetic subtraction is computed by:
- * @f[ output(x,y) = input1(x,y) - input2(x,y) @f]
- */
-class CLArithmeticSubtractionKernel : public ICLKernel
-{
-public:
-    /** Default constructor */
-    CLArithmeticSubtractionKernel();
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CLArithmeticSubtractionKernel(const CLArithmeticSubtractionKernel &) = delete;
-    /** Prevent instances of this class from being copied (As this class contains pointers) */
-    CLArithmeticSubtractionKernel &operator=(const CLArithmeticSubtractionKernel &) = delete;
-    /** Allow instances of this class to be moved */
-    CLArithmeticSubtractionKernel(CLArithmeticSubtractionKernel &&) = default;
-    /** Allow instances of this class to be moved */
-    CLArithmeticSubtractionKernel &operator=(CLArithmeticSubtractionKernel &&) = default;
-    /** Default destructor */
-    ~CLArithmeticSubtractionKernel() = default;
-
-    /** Initialise the kernel's inputs, output and conversion policy.
-     *
-     * @param[in]  input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32.
-     * @param[in]  input2 Second tensor input. Data types supported: U8/QASYMM8/S16/F16/F32.
-     * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8/S16/F16/F32.
-     * @param[in]  policy Policy to use to handle overflow.
-     */
-    void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtractionKernel
-     *
-     * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/F16/F32.
-     * @param[in] input2 Second tensor input info. Data types supported: U8/QASYMM8/S16/F16/F32.
-     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8/S16/F16/F32.
-     * @param[in] policy Policy to use to handle overflow.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
-
-    // Inherited methods overridden:
-    void run(const Window &window, cl::CommandQueue &queue) override;
-    BorderSize border_size() const override;
-
-private:
-    const ICLTensor *_input1; /**< Source tensor 1 */
-    const ICLTensor *_input2; /**< Source tensor 2 */
-    ICLTensor       *_output; /**< Destination tensor */
-};
-} // namespace arm_compute
-#endif /* __ARM_COMPUTE_CLARITHMETICSUBTRACTIONKERNEL_H__ */
diff --git a/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h b/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h
new file mode 100644
index 0000000..2c65789
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLELEMENTWISEOPERATIONKERNEL_H__
+#define __ARM_COMPUTE_CLELEMENTWISEOPERATIONKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for an element-wise operation kernel
+ *
+ * Element-wise operation is computed by:
+ * @f[ output(x,y) = OP(input1(x,y), input2(x,y))@f]
+ *
+ */
+class CLElementwiseOperationKernel : public ICLKernel
+{
+public:
+    /** Default constructor */
+    CLElementwiseOperationKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLElementwiseOperationKernel(const CLElementwiseOperationKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLElementwiseOperationKernel &operator=(const CLElementwiseOperationKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    CLElementwiseOperationKernel(CLElementwiseOperationKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    CLElementwiseOperationKernel &operator=(CLElementwiseOperationKernel &&) = default;
+    /** Default destructor */
+    ~CLElementwiseOperationKernel() = default;
+
+    // Inherited methods overridden:
+    void run(const Window &window, cl::CommandQueue &queue) override;
+
+    BorderSize border_size() const override;
+
+protected:
+    /** The name of the operation */
+    virtual std::string name() = 0;
+
+    /** Initialise the kernel's output.
+     *
+     * @param[in] input1 First tensor input. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input2 Second tensor input. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor. Data types supported: Same as @p input1.
+     *
+     * @return a pair of Status and Window
+     */
+    virtual std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output) = 0;
+
+    /** Validate the argument passed to the kernel
+     *
+     * @param[in] input1 First tensor input. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input2 Second tensor input. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor. Data types supported: Same as @p input1.
+     */
+    virtual Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) = 0;
+
+    /** Generate the build options for the specific kernel
+     *
+     * @reutrn a CLBuildOptions struct
+     */
+    virtual CLBuildOptions generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) = 0;
+
+    /** Generate the identifier for tuning
+     *
+     * @reutrn a string
+     */
+    virtual std::string generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output) = 0;
+
+    /** Commmon configure function for element-wise operators with no additional options (e.g., Div, Min, Max, SquaredDiff)
+     *
+     */
+    void configure_common(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+
+private:
+    const ICLTensor *_input1; /**< Source tensor 1 */
+    const ICLTensor *_input2; /**< Source tensor 2 */
+    ICLTensor       *_output; /**< Destination tensor */
+};
+
+/** Addition operation */
+class CLSaturatedArithmeticOperationKernel : public CLElementwiseOperationKernel
+{
+public:
+    CLSaturatedArithmeticOperationKernel()
+        : CLElementwiseOperationKernel(), _policy(), _op()
+    {
+    }
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel
+     *
+     * @param[in] op     Arithmetic operation to be executed.
+     * @param[in] input1 First tensor input. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input1 First tensor input. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input2 Second tensor input. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor. Data types supported: Same as @p input1.
+     * @param[in] policy Policy to use to handle overflow.
+     */
+    void configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, const ConvertPolicy &policy);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel
+     *
+     * @param[in] op     Arithmetic operation to be executed.
+     * @param[in] input1 First tensor input info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input1 First tensor input info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor info. Data types supported: Same as @p input1.
+     * @param[in] policy Policy to use to handle overflow.
+     *
+     * @return a Status
+     */
+    static Status validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, const ConvertPolicy &policy);
+
+protected:
+    // Inherited methods overridden:
+    std::string name() override;
+    std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output) override;
+    Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) override;
+    CLBuildOptions generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) override;
+    std::string generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output) override;
+
+private:
+    ConvertPolicy       _policy;
+    ArithmeticOperation _op;
+};
+
+class CLArithmeticOperationKernel : public CLElementwiseOperationKernel
+{
+public:
+    CLArithmeticOperationKernel()
+        : CLElementwiseOperationKernel(), _op()
+    {
+    }
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel
+     *
+     * @param[in] op     Arithmetic operation to be executed.
+     * @param[in] input1 First tensor input. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input1 First tensor input. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input2 Second tensor input. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor. Data types supported: Same as @p input1.
+     */
+    void configure(ArithmeticOperation op, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel
+     *
+     * @param[in] op     Arithmetic operation to be executed.
+     * @param[in] input1 First tensor input info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input1 First tensor input info. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor info. Data types supported: Same as @p input1.
+     *
+     * @return a Status
+     */
+    static Status validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+
+protected:
+    // Inherited methods overridden:
+    std::string name() override;
+    std::pair<Status, Window> validate_and_configure_window(ITensorInfo &input1, ITensorInfo &input2, ITensorInfo &output) override;
+    Status validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) override;
+    CLBuildOptions generate_build_options(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output) override;
+    std::string generate_id_for_tuning(const std::string &kernel_name, const ITensorInfo &input1, const ITensorInfo &output) override;
+
+private:
+    ArithmeticOperation _op;
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLELEMENTWISEOPERATIONKERNEL_H__ */
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 7db2f5f..7d632fe 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -552,6 +552,17 @@
     ARG_IDX_MIN  /**< Index of the min value */
 };
 
+/** Available element-wise operations */
+enum class ArithmeticOperation
+{
+    ADD,          /**< (x + y) */
+    SUB,          /**< (x  - y) */
+    DIV,          /**< (x / y) */
+    MIN,          /**< Min(x, y) */
+    MAX,          /**< Max(x, y) */
+    SQUARED_DIFF, /**< (x - y)^2 */
+};
+
 /** The normalization type used for the normalization layer */
 enum class NormType
 {
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index 780597e..e68e719 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -29,9 +29,6 @@
 #include "arm_compute/runtime/CL/functions/CLAccumulate.h"
 #include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
 #include "arm_compute/runtime/CL/functions/CLArgMinMaxLayer.h"
-#include "arm_compute/runtime/CL/functions/CLArithmeticAddition.h"
-#include "arm_compute/runtime/CL/functions/CLArithmeticDivision.h"
-#include "arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h"
 #include "arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h"
 #include "arm_compute/runtime/CL/functions/CLBatchToSpaceLayer.h"
 #include "arm_compute/runtime/CL/functions/CLBitwiseAnd.h"
@@ -63,6 +60,7 @@
 #include "arm_compute/runtime/CL/functions/CLDerivative.h"
 #include "arm_compute/runtime/CL/functions/CLDilate.h"
 #include "arm_compute/runtime/CL/functions/CLDirectConvolutionLayer.h"
+#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "arm_compute/runtime/CL/functions/CLEqualizeHistogram.h"
 #include "arm_compute/runtime/CL/functions/CLErode.h"
 #include "arm_compute/runtime/CL/functions/CLFastCorners.h"
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h b/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
deleted file mode 100644
index 5aba60a..0000000
--- a/arm_compute/runtime/CL/functions/CLArithmeticAddition.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2016-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_CLARITHMETICADDITION_H__
-#define __ARM_COMPUTE_CLARITHMETICADDITION_H__
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Basic function to run @ref CLArithmeticAdditionKernel
- *
- * @note The tensor data type for the inputs must be U8/S16/F16/F32.
- * @note The function performs an arithmetic addition between two tensors.
- */
-class CLArithmeticAddition : public ICLSimpleFunction
-{
-public:
-    /** Initialise the kernel's inputs, output and convertion policy.
-     *
-     * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32.
-     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[in, out] input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
-     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[out]     output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
-     * @param[in]      policy Policy to use to handle overflow.
-     */
-    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticAddition
-     *
-     * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/F16/F32.
-     * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
-     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 ( only if both inputs are QASYMM8), S16/F16/F32.
-     * @param[in] policy Policy to use to handle overflow.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
-};
-}
-#endif /* __ARM_COMPUTE_CLARITHMETICADDITION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticDivision.h b/arm_compute/runtime/CL/functions/CLArithmeticDivision.h
deleted file mode 100644
index c91435c..0000000
--- a/arm_compute/runtime/CL/functions/CLArithmeticDivision.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_CLARITHMETICDIVISION_H__
-#define __ARM_COMPUTE_CLARITHMETICDIVISION_H__
-
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Basic function to run @ref CLArithmeticDivisionKernel
- *
- * @note The tensor data type for the inputs must be F16/F32.
- * @note The function performs an arithmetic division between two tensors.
- */
-class CLArithmeticDivision : public ICLSimpleFunction
-{
-public:
-    /** Initialise the kernel's inputs, output.
-     *
-     * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
-     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[in, out] input2 Second tensor input. Same as @p input1.
-     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[out]     output Output tensor. Data types supported: Same as @p input1.
-     */
-    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
-     *
-     * @param[in] input1 First tensor input info. Data types supported: F16/F32.
-     * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
-     * @param[in] output Output tensor info. Data types supported: Same as @p input1.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
-};
-}
-#endif /* __ARM_COMPUTE_CLARITHMETICDIVISION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h b/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
deleted file mode 100644
index 2940044..0000000
--- a/arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2016-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_CLARITHMETICSUBTRACTION_H__
-#define __ARM_COMPUTE_CLARITHMETICSUBTRACTION_H__
-
-#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-
-#include "arm_compute/core/Types.h"
-
-namespace arm_compute
-{
-class ICLTensor;
-
-/** Basic function to run @ref CLArithmeticSubtractionKernel
- *
- * @note The tensor data type for the inputs must be U8/S16/F16/F32.
- * @note The function performs an arithmetic subtraction between two tensors.
- *
- *  This function calls the following kernels:
- * -# @ref CLFillBorderKernel (In case of broadcasting, in the input being broadcasted)
- * -# @ref CLArithmeticSubtractionKernel
- */
-class CLArithmeticSubtraction : public ICLSimpleFunction
-{
-public:
-    /** Initialise the kernel's inputs, output and convertion policy.
-     *
-     * @param[in]  input1 First tensor input. Data types supported: U8/S16/F16/F32.
-     * @param[in]  input2 Second tensor input. Data types supported: U8/S16/F16/F32.
-     * @param[out] output Output tensor. Data types supported: U8 (Only if both inputs are U8), S16/F16/F32.
-     * @param[in]  policy Policy to use to handle overflow.
-     */
-    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
-    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticSubtraction
-     *
-     * @param[in] input1 First tensor input info. Data types supported: U8/S16/F16/F32.
-     * @param[in] input2 Second tensor input info. Data types supported: U8/S16/F16/F32.
-     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), S16/F16/F32.
-     * @param[in] policy Policy to use to handle overflow.
-     *
-     * @return a status
-     */
-    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
-};
-}
-#endif /* __ARM_COMPUTE_CLARITHMETICSUBTRACTION_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLElementwiseOperations.h b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h
new file mode 100644
index 0000000..4a0911e
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLElementwiseOperations.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARI SING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H__
+#define __ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H__
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for addition
+ *
+ * @note The tensor data type for the inputs must be U8/QASYMM8/S16/S32/U32/F16/F32.
+ * @note The function performs an arithmetic addition between two tensors.
+ */
+class CLArithmeticAddition : public ICLSimpleFunction
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/S16/S32/U32/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
+     * @param[in]      policy Policy to use to handle overflow.
+     */
+    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for addition
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/S32/U32/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 ( only if both inputs are QASYMM8), S16/F16/F32.
+     * @param[in] policy Policy to use to handle overflow.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
+};
+
+/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for subtraction
+ *
+ * @note The tensor data type for the inputs must be U8/QASYMM8/S16/S32/U32/F16/F32.
+ * @note The function performs an arithmetic subtraction between two tensors.
+ */
+class CLArithmeticSubtraction : public ICLSimpleFunction
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/S16/S32/U32/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
+     * @param[in]      policy Policy to use to handle overflow.
+     */
+    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, ConvertPolicy policy);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLSaturatedArithmeticOperationKernel for subtraction
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/S32/U32/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 ( only if both inputs are QASYMM8), S16/F16/F32.
+     * @param[in] policy Policy to use to handle overflow.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy);
+};
+
+/** Basic function to run @ref CLSaturatedArithmeticOperationKernel for division
+ *
+ * @note The tensor data type for the inputs must be F16/F32.
+ * @note The function performs an arithmetic division between two tensors.
+ */
+class CLArithmeticDivision : public ICLSimpleFunction
+{
+public:
+    /** Initialise the kernel's inputs, output.
+     *
+     * @param[in, out] input1 First tensor input. Data types supported: F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2 Second tensor input. Same as @p input1.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output Output tensor. Data types supported: Same as @p input1.
+     */
+    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticDivision
+     *
+     * @param[in] input1 First tensor input info. Data types supported: F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: Same as @p input1.
+     * @param[in] output Output tensor info. Data types supported: Same as @p input1.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+};
+
+/** Basic function to run @ref CLArithmeticOperationKernel for max
+ *
+ * @note The tensor data type for the inputs must be U8/QASYMM8/S16/S32/U32/F16/F32.
+ * @note The function performs a max operation between two tensors.
+ */
+class CLElementwiseMax : public ICLSimpleFunction
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/S16/S32/U32/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
+     */
+    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for max
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/S32/U32/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 ( only if both inputs are QASYMM8), S16/F16/F32.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+};
+
+/** Basic function to run @ref CLArithmeticOperationKernel for min
+ *
+ * @note The tensor data type for the inputs must be U8/QASYMM8/S16/S32/U32/F16/F32.
+ * @note The function performs a max operation between two tensors.
+ */
+class CLElementwiseMin : public ICLSimpleFunction
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/S16/S32/U32/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
+     */
+    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for min
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/S32/U32/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 ( only if both inputs are QASYMM8), S16/F16/F32.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+};
+
+/** Basic function to run @ref CLArithmeticOperationKernel for squared difference
+ *
+ * @note The tensor data type for the inputs must be QASYMM8/U8/S16/F16/F32.
+ * @note The function performs a squared different operation between two tensors (i.e., out[i] = (in1[i] - in2[i])^2
+ */
+class CLElementwiseSquaredDiff : public ICLSimpleFunction
+{
+public:
+    /** Initialise the kernel's inputs, output and conversion policy.
+     *
+     * @param[in, out] input1 First tensor input. Data types supported: U8/QASYMM8/S16/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] input2 Second tensor input. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     *                        The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     output Output tensor. Data types supported: U8 (Only if both inputs are U8), QASYMM8 (only if both inputs are QASYMM8), S16/F16/F32.
+     */
+    void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output);
+    /** Static function to check if given info will lead to a valid configuration of @ref CLArithmeticOperationKernel for squared difference
+     *
+     * @param[in] input1 First tensor input info. Data types supported: U8/QASYMM8/S16/F16/F32.
+     * @param[in] input2 Second tensor input info. Data types supported: U8, QASYMM8 (only if @p input1 is QASYMM8), S16/F16/F32.
+     * @param[in] output Output tensor info. Data types supported: U8 (Only if both inputs are U8), QASYMM8 ( only if both inputs are QASYMM8), S16/F16/F32.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output);
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLELEMENTWISEOPERATIONS_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
index fbf0c08..1468b15 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMConvolutionLayer.h
@@ -26,8 +26,8 @@
 
 #include "arm_compute/runtime/IFunction.h"
 
-#include "arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h"
 #include "arm_compute/core/CL/kernels/CLCol2ImKernel.h"
+#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMInterleave4x4Kernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMMatrixMultiplyKernel.h"
 #include "arm_compute/core/CL/kernels/CLGEMMTranspose1xWKernel.h"
@@ -90,7 +90,7 @@
  * -# @ref CLGEMM (if the data type is FP32 or FP16)
  * -# @ref CLGEMMLowpMatrixMultiplyCore (if the data type is QASYMM8)
  * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint (if the data type is QASYMM8)
- * -# @ref CLArithmeticAdditionKernel (if biases != nullptr and we have a 1x1 convolution with the NHWC data layout)
+ * -# @ref CLElementwiseOperationKernel for addition (if biases != nullptr and we have a 1x1 convolution with the NHWC data layout)
  * -# @ref CLCol2ImKernel (if NCHW data layout)
  */
 class CLGEMMConvolutionLayer : public IFunction
@@ -185,14 +185,14 @@
                               int gemm_3d_depth = 1, bool skip_im2col = false);
 
 private:
-    CLMemoryGroup                    _memory_group;
-    CLConvolutionLayerReshapeWeights _reshape_weights;
-    CLIm2ColKernel                   _im2col_kernel;
-    CLGEMM                           _mm_gemm;
-    CLGEMMLowpMatrixMultiplyCore     _mm_gemmlowp;
-    CLCol2ImKernel                   _col2im_kernel;
-    CLActivationLayer                _activationlayer_function;
-    CLArithmeticAdditionKernel       _add_bias_kernel;
+    CLMemoryGroup                        _memory_group;
+    CLConvolutionLayerReshapeWeights     _reshape_weights;
+    CLIm2ColKernel                       _im2col_kernel;
+    CLGEMM                               _mm_gemm;
+    CLGEMMLowpMatrixMultiplyCore         _mm_gemmlowp;
+    CLCol2ImKernel                       _col2im_kernel;
+    CLActivationLayer                    _activationlayer_function;
+    CLSaturatedArithmeticOperationKernel _add_bias_kernel;
 
     const ICLTensor *_original_weights;
 
diff --git a/arm_compute/runtime/CL/functions/CLLSTMLayer.h b/arm_compute/runtime/CL/functions/CLLSTMLayer.h
index 72e41a7..87fb119 100644
--- a/arm_compute/runtime/CL/functions/CLLSTMLayer.h
+++ b/arm_compute/runtime/CL/functions/CLLSTMLayer.h
@@ -27,14 +27,13 @@
 #include "arm_compute/runtime/IFunction.h"
 
 #include "arm_compute/core/CL/kernels/CLActivationLayerKernel.h"
-#include "arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h"
-#include "arm_compute/core/CL/kernels/CLArithmeticSubtractionKernel.h"
 #include "arm_compute/core/CL/kernels/CLCopyKernel.h"
+#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h"
 #include "arm_compute/core/CL/kernels/CLPixelWiseMultiplicationKernel.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CL/CLMemoryGroup.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/functions/CLArithmeticAddition.h"
+#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
 #include "arm_compute/runtime/CL/functions/CLGEMM.h"
 #include "arm_compute/runtime/CL/functions/CLWidthConcatenateLayer.h"
@@ -141,76 +140,76 @@
     void run() override;
 
 private:
-    CLMemoryGroup                   _memory_group;
-    CLFullyConnectedLayer           _fully_connected_input_gate;
-    CLGEMM                          _gemm_input_gate;
-    CLTransposeKernel               _transpose_input_gate;
-    CLArithmeticAdditionKernel      _accum_input_gate1;
-    CLArithmeticAddition            _accum_input_gate2;
-    CLArithmeticSubtractionKernel   _subtract_input_gate;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_input_gate;
-    CLActivationLayerKernel         _activation_input_gate;
-    CLFullyConnectedLayer           _fully_connected_forget_gate;
-    CLGEMM                          _gemm_forget_gate;
-    CLTransposeKernel               _transpose_forget_gate;
-    CLArithmeticAdditionKernel      _accum_forget_gate1;
-    CLArithmeticAddition            _accum_forget_gate2;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_forget_gate;
-    CLActivationLayerKernel         _activation_forget_gate;
-    CLFullyConnectedLayer           _fully_connected_cell_state;
-    CLGEMM                          _gemm_cell_state1;
-    CLGEMM                          _gemm_cell_state2;
-    CLTransposeKernel               _transpose_cell_state;
-    CLArithmeticAdditionKernel      _accum_cell_state1;
-    CLArithmeticAdditionKernel      _accum_cell_state2;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_state1;
-    CLActivationLayerKernel         _activation_cell_state;
-    CLActivationLayerKernel         _cell_clip;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_cell_state2;
-    CLFullyConnectedLayer           _fully_connected_output;
-    CLGEMM                          _gemm_output;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_output_state1;
-    CLTransposeKernel               _transpose_output;
-    CLArithmeticAdditionKernel      _accum_output1;
-    CLArithmeticAddition            _accum_output2;
-    CLActivationLayerKernel         _activation_output;
-    CLActivationLayerKernel         _activation_output_state;
-    CLPixelWiseMultiplicationKernel _pixelwise_mul_output_state2;
-    CLFullyConnectedLayer           _fully_connected_output_state;
-    CLGEMM                          _gemm_output_state;
-    CLArithmeticAdditionKernel      _accum_output_state;
-    CLActivationLayerKernel         _projection_clip;
-    CLCopyKernel                    _copy_cell_state;
-    CLCopyKernel                    _copy_output;
-    CLWidthConcatenateLayer         _concat_scratch_buffer;
-    CLTensor                        _input_gate_out1;
-    CLTensor                        _input_gate_out2;
-    CLTensor                        _input_gate_out3;
-    CLTensor                        _input_gate_out4;
-    CLTensor                        _input_gate_out5;
-    CLTensor                        _forget_gate_out1;
-    CLTensor                        _forget_gate_out2;
-    CLTensor                        _forget_gate_out3;
-    CLTensor                        _forget_gate_out4;
-    CLTensor                        _forget_gate_out5;
-    CLTensor                        _cell_state_out1;
-    CLTensor                        _cell_state_out2;
-    CLTensor                        _cell_state_out3;
-    CLTensor                        _cell_state_out4;
-    CLTensor                        _cell_state_out5;
-    CLTensor                        _output1;
-    CLTensor                        _output2;
-    CLTensor                        _output3;
-    CLTensor                        _output4;
-    CLTensor                        _output5;
-    CLTensor                        _cell_state_activation;
-    CLTensor                        _output_state1;
-    CLTensor                        _ones;
-    bool                            _run_peephole_opt;
-    bool                            _run_cifg_opt;
-    bool                            _perform_cell_clipping;
-    bool                            _has_projection_weights;
-    bool                            _perform_projection_clipping;
+    CLMemoryGroup                        _memory_group;
+    CLFullyConnectedLayer                _fully_connected_input_gate;
+    CLGEMM                               _gemm_input_gate;
+    CLTransposeKernel                    _transpose_input_gate;
+    CLSaturatedArithmeticOperationKernel _accum_input_gate1;
+    CLArithmeticAddition                 _accum_input_gate2;
+    CLSaturatedArithmeticOperationKernel _subtract_input_gate;
+    CLPixelWiseMultiplicationKernel      _pixelwise_mul_input_gate;
+    CLActivationLayerKernel              _activation_input_gate;
+    CLFullyConnectedLayer                _fully_connected_forget_gate;
+    CLGEMM                               _gemm_forget_gate;
+    CLTransposeKernel                    _transpose_forget_gate;
+    CLSaturatedArithmeticOperationKernel _accum_forget_gate1;
+    CLArithmeticAddition                 _accum_forget_gate2;
+    CLPixelWiseMultiplicationKernel      _pixelwise_mul_forget_gate;
+    CLActivationLayerKernel              _activation_forget_gate;
+    CLFullyConnectedLayer                _fully_connected_cell_state;
+    CLGEMM                               _gemm_cell_state1;
+    CLGEMM                               _gemm_cell_state2;
+    CLTransposeKernel                    _transpose_cell_state;
+    CLSaturatedArithmeticOperationKernel _accum_cell_state1;
+    CLSaturatedArithmeticOperationKernel _accum_cell_state2;
+    CLPixelWiseMultiplicationKernel      _pixelwise_mul_cell_state1;
+    CLActivationLayerKernel              _activation_cell_state;
+    CLActivationLayerKernel              _cell_clip;
+    CLPixelWiseMultiplicationKernel      _pixelwise_mul_cell_state2;
+    CLFullyConnectedLayer                _fully_connected_output;
+    CLGEMM                               _gemm_output;
+    CLPixelWiseMultiplicationKernel      _pixelwise_mul_output_state1;
+    CLTransposeKernel                    _transpose_output;
+    CLSaturatedArithmeticOperationKernel _accum_output1;
+    CLArithmeticAddition                 _accum_output2;
+    CLActivationLayerKernel              _activation_output;
+    CLActivationLayerKernel              _activation_output_state;
+    CLPixelWiseMultiplicationKernel      _pixelwise_mul_output_state2;
+    CLFullyConnectedLayer                _fully_connected_output_state;
+    CLGEMM                               _gemm_output_state;
+    CLSaturatedArithmeticOperationKernel _accum_output_state;
+    CLActivationLayerKernel              _projection_clip;
+    CLCopyKernel                         _copy_cell_state;
+    CLCopyKernel                         _copy_output;
+    CLWidthConcatenateLayer              _concat_scratch_buffer;
+    CLTensor                             _input_gate_out1;
+    CLTensor                             _input_gate_out2;
+    CLTensor                             _input_gate_out3;
+    CLTensor                             _input_gate_out4;
+    CLTensor                             _input_gate_out5;
+    CLTensor                             _forget_gate_out1;
+    CLTensor                             _forget_gate_out2;
+    CLTensor                             _forget_gate_out3;
+    CLTensor                             _forget_gate_out4;
+    CLTensor                             _forget_gate_out5;
+    CLTensor                             _cell_state_out1;
+    CLTensor                             _cell_state_out2;
+    CLTensor                             _cell_state_out3;
+    CLTensor                             _cell_state_out4;
+    CLTensor                             _cell_state_out5;
+    CLTensor                             _output1;
+    CLTensor                             _output2;
+    CLTensor                             _output3;
+    CLTensor                             _output4;
+    CLTensor                             _output5;
+    CLTensor                             _cell_state_activation;
+    CLTensor                             _output_state1;
+    CLTensor                             _ones;
+    bool                                 _run_peephole_opt;
+    bool                                 _run_cifg_opt;
+    bool                                 _perform_cell_clipping;
+    bool                                 _has_projection_weights;
+    bool                                 _perform_projection_clipping;
 };
 }
 #endif /* __ARM_COMPUTE_CLLSTMLAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLLaplacianPyramid.h b/arm_compute/runtime/CL/functions/CLLaplacianPyramid.h
index 585a013..ae86e93 100644
--- a/arm_compute/runtime/CL/functions/CLLaplacianPyramid.h
+++ b/arm_compute/runtime/CL/functions/CLLaplacianPyramid.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,8 +26,8 @@
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CL/CLPyramid.h"
-#include "arm_compute/runtime/CL/functions/CLArithmeticSubtraction.h"
 #include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
+#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "arm_compute/runtime/CL/functions/CLGaussian5x5.h"
 #include "arm_compute/runtime/CL/functions/CLGaussianPyramid.h"
 #include "arm_compute/runtime/IFunction.h"
diff --git a/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h b/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
index 6905b03..622b049 100644
--- a/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
+++ b/arm_compute/runtime/CL/functions/CLLaplacianReconstruct.h
@@ -26,8 +26,8 @@
 
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CL/CLPyramid.h"
-#include "arm_compute/runtime/CL/functions/CLArithmeticAddition.h"
 #include "arm_compute/runtime/CL/functions/CLDepthConvertLayer.h"
+#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "arm_compute/runtime/CL/functions/CLScale.h"
 #include "arm_compute/runtime/IFunction.h"
 
diff --git a/arm_compute/runtime/CL/functions/CLRNNLayer.h b/arm_compute/runtime/CL/functions/CLRNNLayer.h
index ab7407d..fc86992 100644
--- a/arm_compute/runtime/CL/functions/CLRNNLayer.h
+++ b/arm_compute/runtime/CL/functions/CLRNNLayer.h
@@ -25,8 +25,8 @@
 #define __ARM_COMPUTE_CLRNN_LAYER_H__
 
 #include "arm_compute/core/CL/kernels/CLActivationLayerKernel.h"
-#include "arm_compute/core/CL/kernels/CLArithmeticAdditionKernel.h"
 #include "arm_compute/core/CL/kernels/CLCopyKernel.h"
+#include "arm_compute/core/CL/kernels/CLElementwiseOperationKernel.h"
 #include "arm_compute/runtime/CL/ICLSimpleFunction.h"
 #include "arm_compute/runtime/CL/functions/CLFullyConnectedLayer.h"
 #include "arm_compute/runtime/CL/functions/CLGEMM.h"
@@ -72,16 +72,16 @@
     void prepare() override;
 
 private:
-    CLMemoryGroup              _memory_group;
-    CLGEMM                     _gemm_state_f;
-    CLArithmeticAdditionKernel _add_kernel;
-    CLActivationLayerKernel    _activation_kernel;
-    CLFullyConnectedLayer      _fully_connected_kernel;
-    CLCopyKernel               _copy_kernel;
-    CLTensor                   _fully_connected_out;
-    CLTensor                   _gemm_output;
-    CLTensor                   _add_output;
-    bool                       _is_prepared;
+    CLMemoryGroup                        _memory_group;
+    CLGEMM                               _gemm_state_f;
+    CLSaturatedArithmeticOperationKernel _add_kernel;
+    CLActivationLayerKernel              _activation_kernel;
+    CLFullyConnectedLayer                _fully_connected_kernel;
+    CLCopyKernel                         _copy_kernel;
+    CLTensor                             _fully_connected_out;
+    CLTensor                             _gemm_output;
+    CLTensor                             _add_output;
+    bool                                 _is_prepared;
 };
 }
 #endif /* __ARM_COMPUTE_CLRNN_LAYER_H__ */
diff --git a/arm_compute/runtime/CL/functions/CLReduceMean.h b/arm_compute/runtime/CL/functions/CLReduceMean.h
index 5a919e5..ba10134 100644
--- a/arm_compute/runtime/CL/functions/CLReduceMean.h
+++ b/arm_compute/runtime/CL/functions/CLReduceMean.h
@@ -25,7 +25,7 @@
 #define __ARM_COMPUTE_CL_REDUCE_MEAN_H__
 
 #include "arm_compute/runtime/CL/ICLSimpleFunction.h"
-#include "arm_compute/runtime/CL/functions/CLArithmeticDivision.h"
+#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "arm_compute/runtime/CL/functions/CLReductionOperation.h"
 #include "arm_compute/runtime/CL/functions/CLReshapeLayer.h"
 #include "arm_compute/runtime/IMemoryManager.h"