COMPMID-584: Add validation to channel_combine kernels

Change-Id: I67fe3fcea08704d9f4b04d22fe34db83b2697b87
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/110562
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Tested-by: Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/Validate.h b/arm_compute/core/Validate.h
index 8a02574..d7a9048 100644
--- a/arm_compute/core/Validate.h
+++ b/arm_compute/core/Validate.h
@@ -320,6 +320,39 @@
 #define ARM_COMPUTE_RETURN_ERROR_ON_TENSORS_NOT_EVEN(...) \
     ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_tensors_not_even(__func__, __FILE__, __LINE__, __VA_ARGS__))
 
+/** Return an error if the passed tensor objects are not sub-sampled.
+ *
+ * @param[in] function Function in which the error occurred.
+ * @param[in] file     Name of the file where the error occurred.
+ * @param[in] line     Line on which the error occurred.
+ * @param[in] format   Format to check if sub-sampling allowed.
+ * @param[in] shape    The tensor shape to calculate sub-sampling from.
+ * @param[in] tensor1  The first object to be compared.
+ * @param[in] tensors  (Optional) Further allowed objects.
+ *
+ * @return Status
+ */
+template <typename... Ts>
+arm_compute::Status error_on_tensors_not_subsampled(const char *function, const char *file, int line,
+                                                    const Format &format, const TensorShape &shape, const ITensor *tensor1, Ts... tensors)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_LOC(tensor1 == nullptr, function, file, line);
+    ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_nullptr(function, file, line, std::forward<Ts>(tensors)...));
+    const TensorShape sub2_shape = calculate_subsampled_shape(shape, format);
+    const std::array < const ITensor *, 1 + sizeof...(Ts) > tensors_info_array{ { tensor1, std::forward<Ts>(tensors)... } };
+    ARM_COMPUTE_RETURN_ERROR_ON_LOC_MSG(std::any_of(tensors_info_array.cbegin(), tensors_info_array.cend(), [&](const ITensor * tensor)
+    {
+        return detail::have_different_dimensions(tensor->info()->tensor_shape(), sub2_shape, 2);
+    }),
+    function, file, line, "Tensor shape has mismatch dimensions for sub-sampling");
+    return arm_compute::Status{};
+}
+
+#define ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(...) \
+    ARM_COMPUTE_ERROR_THROW_ON(::arm_compute::error_on_tensors_not_subsampled(__func__, __FILE__, __LINE__, __VA_ARGS__))
+#define ARM_COMPUTE_RETURN_ERROR_ON_TENSORS_NOT_SUBSAMPLED(...) \
+    ARM_COMPUTE_RETURN_ON_ERROR(::arm_compute::error_on_tensors_not_subsampled(__func__, __FILE__, __LINE__, __VA_ARGS__))
+
 /** Return an error if the passed two tensor infos have different shapes from the given dimension
  *
  * @param[in] function      Function in which the error occurred.
diff --git a/src/core/CL/cl_kernels/channel_combine.cl b/src/core/CL/cl_kernels/channel_combine.cl
index d309812..4207414 100644
--- a/src/core/CL/cl_kernels/channel_combine.cl
+++ b/src/core/CL/cl_kernels/channel_combine.cl
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -338,9 +338,9 @@
     uchar8 data2 = vload8(0, src_plane2.ptr);
 
 #ifdef NV12
-    vstore16(shuffle2(data1, data2, (uchar16)(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15)), 0, dst_plane1.ptr);
+    vstore16(shuffle2(data1, data2, (uchar16)(0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15)), 0, dst_plane1.ptr);
 #elif defined(NV21)
-    vstore16(shuffle2(data2, data1, (uchar16)(0, 2, 4, 6, 8, 10, 12, 14, 1, 3, 5, 7, 9, 11, 13, 15)), 0, dst_plane1.ptr);
+    vstore16(shuffle2(data2, data1, (uchar16)(0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15)), 0, dst_plane1.ptr);
 #endif /* NV12 or NV21 */
 }
 
diff --git a/src/core/CL/kernels/CLChannelCombineKernel.cpp b/src/core/CL/kernels/CLChannelCombineKernel.cpp
index d729ebc..6e55e66 100644
--- a/src/core/CL/kernels/CLChannelCombineKernel.cpp
+++ b/src/core/CL/kernels/CLChannelCombineKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -48,41 +48,62 @@
 
 void CLChannelCombineKernel::configure(const ICLTensor *plane0, const ICLTensor *plane1, const ICLTensor *plane2, const ICLTensor *plane3, ICLTensor *output)
 {
+    ARM_COMPUTE_ERROR_ON_NULLPTR(plane0, plane1, plane2, output);
+    ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane0);
+    ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane1);
+    ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane2);
+    ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(output);
+
     ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane0, Format::U8);
     ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane1, Format::U8);
     ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane2, Format::U8);
     ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(output, Format::RGB888, Format::RGBA8888, Format::YUYV422, Format::UYVY422);
 
-    const Format fmt = output->info()->format();
-    _planes[0]       = plane0;
-    _planes[1]       = plane1;
-    _planes[2]       = plane2;
-    if(Format::RGBA8888 == fmt)
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane0, 1, DataType::U8);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane1, 1, DataType::U8);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane2, 1, DataType::U8);
+
+    const Format output_format = output->info()->format();
+
+    // Check if horizontal dimension of Y plane is even and validate horizontal sub-sampling dimensions for U and V planes
+    if(Format::YUYV422 == output_format || Format::UYVY422 == output_format)
     {
+        // Validate Y plane of input and output
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_EVEN(output_format, plane0, output);
+
+        // Validate U and V plane of the input
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(output_format, plane0->info()->tensor_shape(), plane1, plane2);
+    }
+
+    _planes[0] = plane0;
+    _planes[1] = plane1;
+    _planes[2] = plane2;
+    _planes[3] = nullptr;
+
+    // Validate the last input tensor only for RGBA format
+    if(Format::RGBA8888 == output_format)
+    {
+        ARM_COMPUTE_ERROR_ON_NULLPTR(plane3);
+        ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane3);
+
         ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane3, Format::U8);
+        ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane3, 1, DataType::U8);
+
         _planes[3] = plane3;
     }
-    else
-    {
-        _planes[3] = nullptr;
-    }
+
     _output       = output;
     _output_multi = nullptr;
 
-    // Half the processed elements for U,V channels due to sub-sampling of 2
-    if(Format::YUYV422 == fmt || Format::UYVY422 == fmt)
+    // Half the processed elements for U and V channels due to horizontal sub-sampling of 2
+    if(Format::YUYV422 == output_format || Format::UYVY422 == output_format)
     {
-        _x_subsampling = { { 1, 2, 2 } };
-        _y_subsampling = { { 1, 2, 2 } };
-    }
-    else
-    {
-        _x_subsampling = { { 1, 1, 1 } };
-        _y_subsampling = { { 1, 1, 1 } };
+        _x_subsampling[1] = 2;
+        _x_subsampling[2] = 2;
     }
 
     // Create kernel
-    std::string kernel_name = "channel_combine_" + string_from_format(fmt);
+    std::string kernel_name = "channel_combine_" + string_from_format(output_format);
     _kernel                 = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name));
 
     // Configure window
@@ -112,50 +133,78 @@
 
 void CLChannelCombineKernel::configure(const ICLImage *plane0, const ICLImage *plane1, const ICLImage *plane2, ICLMultiImage *output)
 {
+    ARM_COMPUTE_ERROR_ON_NULLPTR(plane0, plane1, plane2, output);
     ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane0);
     ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane1);
     ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane2);
+
     ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane0, Format::U8);
     ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane1, Format::U8);
     ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane2, Format::U8);
     ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(output, Format::NV12, Format::NV21, Format::IYUV, Format::YUV444);
 
-    _planes[0]           = plane0;
-    _planes[1]           = plane1;
-    _planes[2]           = plane2;
-    _planes[3]           = nullptr;
-    _output              = nullptr;
-    _output_multi        = output;
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane0, 1, DataType::U8);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane1, 1, DataType::U8);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane2, 1, DataType::U8);
+
+    const Format output_format = output->info()->format();
+
+    // Validate shape of Y plane to be even and shape of sub-sampling dimensions for U and V planes
+    // Perform validation only for formats which require sub-sampling.
+    if(Format::YUV444 != output_format)
+    {
+        // Validate Y plane of input and output
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_EVEN(output_format, plane0, output->plane(0));
+
+        // Validate U and V plane of the input
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(output_format, plane0->info()->tensor_shape(), plane1, plane2);
+
+        // Validate second plane U (NV12 and NV21 have a UV88 combined plane while IYUV has only the U plane)
+        // MultiImage generates the correct tensor shape but also check in case the tensor shape of planes was changed to a wrong size
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(output_format, plane0->info()->tensor_shape(), output->plane(1));
+
+        // Validate the last plane V of format IYUV
+        if(Format::IYUV == output_format)
+        {
+            // Validate Y plane of the output
+            ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(output_format, plane0->info()->tensor_shape(), output->plane(2));
+        }
+    }
+
+    // Set input tensors
+    _planes[0] = plane0;
+    _planes[1] = plane1;
+    _planes[2] = plane2;
+    _planes[3] = nullptr;
+
+    // Set output tensor
+    _output       = nullptr;
+    _output_multi = output;
+
     bool has_two_planars = false;
 
     // Set sub-sampling parameters for each plane
-    const Format          fmt = output->info()->format();
     std::string           kernel_name;
     std::set<std::string> build_opts;
 
-    if(Format::NV12 == fmt || Format::NV21 == fmt)
+    if(Format::NV12 == output_format || Format::NV21 == output_format)
     {
         _x_subsampling = { { 1, 2, 2 } };
         _y_subsampling = { { 1, 2, 2 } };
         kernel_name    = "channel_combine_NV";
-        build_opts.emplace(Format::NV12 == fmt ? "-DNV12" : "-DNV21");
+        build_opts.emplace(Format::NV12 == output_format ? "-DNV12" : "-DNV21");
         has_two_planars = true;
     }
     else
     {
-        if(Format::IYUV == fmt)
+        if(Format::IYUV == output_format)
         {
             _x_subsampling = { { 1, 2, 2 } };
             _y_subsampling = { { 1, 2, 2 } };
         }
-        else
-        {
-            _x_subsampling = { { 1, 1, 1 } };
-            _y_subsampling = { { 1, 1, 1 } };
-        }
 
         kernel_name = "copy_planes_3p";
-        build_opts.emplace(Format::IYUV == fmt ? "-DIYUV" : "-DYUV444");
+        build_opts.emplace(Format::IYUV == output_format ? "-DIYUV" : "-DYUV444");
     }
 
     // Create kernel
@@ -166,12 +215,12 @@
 
     Window win = calculate_max_window(*plane0->info(), Steps(num_elems_processed_per_iteration));
 
-    AccessWindowHorizontal input_plane0_access(plane0->info(), 0, num_elems_processed_per_iteration);
-    AccessWindowRectangle  input_plane1_access(plane1->info(), 0, 0, num_elems_processed_per_iteration, 1, 1.f / _x_subsampling[1], 1.f / _y_subsampling[1]);
-    AccessWindowRectangle  input_plane2_access(plane2->info(), 0, 0, num_elems_processed_per_iteration, 1, 1.f / _x_subsampling[2], 1.f / _y_subsampling[2]);
-    AccessWindowRectangle  output_plane0_access(output->plane(0)->info(), 0, 0, num_elems_processed_per_iteration, 1, 1.f, 1.f / _y_subsampling[1]);
-    AccessWindowRectangle  output_plane1_access(output->plane(1)->info(), 0, 0, num_elems_processed_per_iteration, 1, 1.f / _x_subsampling[1], 1.f / _y_subsampling[1]);
-    AccessWindowRectangle  output_plane2_access(has_two_planars ? nullptr : output->plane(2)->info(), 0, 0, num_elems_processed_per_iteration, 1, 1.f / _x_subsampling[2], 1.f / _y_subsampling[2]);
+    AccessWindowRectangle input_plane0_access(plane0->info(), 0, 0, num_elems_processed_per_iteration, 1.f);
+    AccessWindowRectangle input_plane1_access(plane1->info(), 0, 0, num_elems_processed_per_iteration, 1.f, 1.f / _x_subsampling[1], 1.f / _y_subsampling[1]);
+    AccessWindowRectangle input_plane2_access(plane2->info(), 0, 0, num_elems_processed_per_iteration, 1.f, 1.f / _x_subsampling[2], 1.f / _y_subsampling[2]);
+    AccessWindowRectangle output_plane0_access(output->plane(0)->info(), 0, 0, num_elems_processed_per_iteration, 1.f, 1.f, 1.f / _y_subsampling[1]);
+    AccessWindowRectangle output_plane1_access(output->plane(1)->info(), 0, 0, num_elems_processed_per_iteration, 1.f, 1.f / _x_subsampling[1], 1.f / _y_subsampling[1]);
+    AccessWindowRectangle output_plane2_access(has_two_planars ? nullptr : output->plane(2)->info(), 0, 0, num_elems_processed_per_iteration, 1.f, 1.f / _x_subsampling[2], 1.f / _y_subsampling[2]);
 
     update_window_and_padding(win,
                               input_plane0_access, input_plane1_access, input_plane2_access,
@@ -192,6 +241,7 @@
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
 
     Window slice = window.first_slice_window_2D();
+    slice.set_dimension_step(Window::DimY, 1);
 
     do
     {
diff --git a/src/core/NEON/kernels/NEChannelCombineKernel.cpp b/src/core/NEON/kernels/NEChannelCombineKernel.cpp
index a2b24de..28fb4bd 100644
--- a/src/core/NEON/kernels/NEChannelCombineKernel.cpp
+++ b/src/core/NEON/kernels/NEChannelCombineKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -56,47 +56,58 @@
     ARM_COMPUTE_ERROR_ON(plane1 == output);
     ARM_COMPUTE_ERROR_ON(plane2 == output);
 
-    set_format_if_unknown(*plane0->info(), Format::U8);
-    set_format_if_unknown(*plane1->info(), Format::U8);
-    set_format_if_unknown(*plane2->info(), Format::U8);
-
-    if(plane3 != nullptr)
-    {
-        set_format_if_unknown(*plane3->info(), Format::U8);
-    }
-
-    set_shape_if_empty(*output->info(), plane0->info()->tensor_shape());
+    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane0, Format::U8);
+    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane1, Format::U8);
+    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane2, Format::U8);
+    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(output, Format::RGB888, Format::RGBA8888, Format::UYVY422, Format::YUYV422);
 
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane0, 1, DataType::U8);
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane1, 1, DataType::U8);
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane2, 1, DataType::U8);
-    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(output, Format::RGB888, Format::RGBA8888, Format::UYVY422, Format::YUYV422);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(plane0, plane1, plane2);
 
-    if(plane3 != nullptr)
+    const Format output_format = output->info()->format();
+
+    // Check if horizontal dimension of Y plane is even and validate horizontal sub-sampling dimensions for U and V planes
+    if(Format::YUYV422 == output_format || Format::UYVY422 == output_format)
     {
-        ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(plane0, plane3);
-        ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(plane0, plane3);
+        // Validate Y plane of input and output
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_EVEN(output_format, plane0, output);
+
+        // Validate U and V plane of the input
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(output_format, plane0->info()->tensor_shape(), plane1, plane2);
     }
 
-    const Format &output_format = output->info()->format();
+    _planes[0] = plane0;
+    _planes[1] = plane1;
+    _planes[2] = plane2;
+    _planes[3] = nullptr;
 
-    if(output_format == Format::RGBA8888)
+    // Validate the last input tensor only for RGBA format
+    if(Format::RGBA8888 == output_format)
     {
-        ARM_COMPUTE_ERROR_ON(plane3 == output);
+        ARM_COMPUTE_ERROR_ON_NULLPTR(plane3);
+        ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane3);
+
+        ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane3, Format::U8);
         ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane3, 1, DataType::U8);
+
+        _planes[3] = plane3;
     }
 
-    _planes[0]    = plane0;
-    _planes[1]    = plane1;
-    _planes[2]    = plane2;
-    _planes[3]    = plane3;
     _output       = output;
     _output_multi = nullptr;
 
+    // Half the processed elements for U and V channels due to horizontal sub-sampling of 2
+    if(Format::YUYV422 == output_format || Format::UYVY422 == output_format)
+    {
+        _x_subsampling[1] = 2;
+        _x_subsampling[2] = 2;
+    }
+
     _num_elems_processed_per_iteration = 8;
     _is_parallelizable                 = true;
 
+    // Select function and number of elements to process given the output format
     switch(output_format)
     {
         case Format::RGB888:
@@ -106,14 +117,10 @@
             _func = &NEChannelCombineKernel::combine_4C;
             break;
         case Format::UYVY422:
-            _x_subsampling[1]                  = 2;
-            _x_subsampling[2]                  = 2;
             _num_elems_processed_per_iteration = 16;
             _func                              = &NEChannelCombineKernel::combine_YUV_1p<true>;
             break;
         case Format::YUYV422:
-            _x_subsampling[1]                  = 2;
-            _x_subsampling[2]                  = 2;
             _num_elems_processed_per_iteration = 16;
             _func                              = &NEChannelCombineKernel::combine_YUV_1p<false>;
             break;
@@ -122,14 +129,6 @@
             break;
     }
 
-    TensorShape subsampled_shape_plane1{ plane0->info()->tensor_shape() };
-    subsampled_shape_plane1.set(0, subsampled_shape_plane1[0] / _x_subsampling[1]);
-    TensorShape subsampled_shape_plane2{ plane0->info()->tensor_shape() };
-    subsampled_shape_plane2.set(0, subsampled_shape_plane2[0] / _x_subsampling[2]);
-
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(plane1->info()->tensor_shape(), subsampled_shape_plane1);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(plane2->info()->tensor_shape(), subsampled_shape_plane2);
-
     Window win = calculate_max_window(*plane0->info(), Steps(_num_elems_processed_per_iteration));
 
     AccessWindowHorizontal output_access(output->info(), 0, _num_elems_processed_per_iteration);
@@ -167,65 +166,52 @@
     ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane1);
     ARM_COMPUTE_ERROR_ON_TENSOR_NOT_2D(plane2);
 
-    set_format_if_unknown(*plane0->info(), Format::U8);
-    set_format_if_unknown(*plane1->info(), Format::U8);
-    set_format_if_unknown(*plane2->info(), Format::U8);
+    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane0, Format::U8);
+    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane1, Format::U8);
+    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(plane2, Format::U8);
+    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(output, Format::NV12, Format::NV21, Format::IYUV, Format::YUV444);
 
-    set_shape_if_empty(*output->plane(0)->info(), plane0->info()->tensor_shape());
-
-    switch(output->info()->format())
-    {
-        case Format::NV12:
-        case Format::NV21:
-        case Format::IYUV:
-        {
-            TensorShape subsampled_shape = plane0->info()->tensor_shape();
-            subsampled_shape.set(0, subsampled_shape[0] / 2);
-            subsampled_shape.set(1, subsampled_shape[1] / 2);
-
-            set_shape_if_empty(*output->plane(1)->info(), subsampled_shape);
-
-            ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->plane(1)->info()->tensor_shape(), subsampled_shape);
-
-            if(output->info()->format() == Format::IYUV)
-            {
-                set_shape_if_empty(*output->plane(2)->info(), subsampled_shape);
-
-                ARM_COMPUTE_ERROR_ON_MISMATCHING_DIMENSIONS(output->plane(2)->info()->tensor_shape(), subsampled_shape);
-            }
-            break;
-        }
-        case Format::YUV444:
-            set_shape_if_empty(*output->plane(1)->info(), plane0->info()->tensor_shape());
-            set_shape_if_empty(*output->plane(2)->info(), plane0->info()->tensor_shape());
-
-            ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(plane1, plane2, output->plane(1), output->plane(2));
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Unsupported format");
-    }
-
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(plane0, output->plane(0));
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane0, 1, DataType::U8);
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane1, 1, DataType::U8);
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(plane2, 1, DataType::U8);
-    ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(output, Format::NV12, Format::NV21, Format::IYUV, Format::YUV444);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(plane0, plane1, plane2);
 
-    _planes[0]                            = plane0;
-    _planes[1]                            = plane1;
-    _planes[2]                            = plane2;
-    _planes[3]                            = nullptr;
-    _output                               = nullptr;
-    _output_multi                         = output;
+    const Format output_format = output->info()->format();
+
+    // Validate shape of Y plane to be even and shape of sub-sampling dimensions for U and V planes
+    // Perform validation only for formats which require sub-sampling.
+    if(Format::YUV444 != output_format)
+    {
+        // Validate Y plane of input and output
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_EVEN(output_format, plane0, output->plane(0));
+
+        // Validate U and V plane of the input
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(output_format, plane0->info()->tensor_shape(), plane1, plane2);
+
+        // Validate second plane U (NV12 and NV21 have a UV88 combined plane while IYUV has only the U plane)
+        // MultiImage generates the correct tensor shape but also check in case the tensor shape of planes was changed to a wrong size
+        ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(output_format, plane0->info()->tensor_shape(), output->plane(1));
+
+        // Validate the last plane V of format IYUV
+        if(Format::IYUV == output_format)
+        {
+            // Validate Y plane of the output
+            ARM_COMPUTE_ERROR_ON_TENSORS_NOT_SUBSAMPLED(output_format, plane0->info()->tensor_shape(), output->plane(2));
+        }
+    }
+
+    _planes[0]    = plane0;
+    _planes[1]    = plane1;
+    _planes[2]    = plane2;
+    _planes[3]    = nullptr;
+    _output       = nullptr;
+    _output_multi = output;
+
     bool         has_two_planes           = false;
     unsigned int num_elems_written_plane1 = 8;
 
     _num_elems_processed_per_iteration = 8;
     _is_parallelizable                 = true;
 
-    const Format &output_format = output->info()->format();
-
     switch(output_format)
     {
         case Format::NV12:
@@ -268,8 +254,7 @@
                               output_plane1_access,
                               output_plane2_access);
 
-    ValidRegion plane0_valid_region = plane0->info()->valid_region();
-
+    ValidRegion plane0_valid_region  = plane0->info()->valid_region();
     ValidRegion output_plane1_region = has_two_planes ? intersect_valid_regions(plane1->info()->valid_region(), plane2->info()->valid_region()) : plane2->info()->valid_region();
 
     output_plane0_access.set_valid_region(win, ValidRegion(plane0_valid_region.anchor, output->plane(0)->info()->tensor_shape()));
@@ -358,7 +343,7 @@
 {
     // Create sub-sampled uv window and init uv planes
     Window win_uv(win);
-    win_uv.set_dimension_step(0, win.x().step() / _x_subsampling[1]);
+    win_uv.set_dimension_step(Window::DimX, win.x().step() / _x_subsampling[1]);
     win_uv.validate();
 
     Iterator p0(_planes[0], win);
@@ -405,13 +390,13 @@
 
     // Update UV window
     Window uv_win(win);
-    uv_win.set(Window::DimX, Window::Dimension(uv_win.x().start() / _x_subsampling[1], uv_win.x().end() / _x_subsampling[1], _num_elems_processed_per_iteration));
+    uv_win.set(Window::DimX, Window::Dimension(uv_win.x().start() / _x_subsampling[1], uv_win.x().end() / _x_subsampling[1], uv_win.x().step() / _x_subsampling[1]));
     uv_win.set(Window::DimY, Window::Dimension(uv_win.y().start() / _y_subsampling[1], uv_win.y().end() / _y_subsampling[1], 1));
     uv_win.validate();
 
     // Update output win
     Window out_win(win);
-    out_win.set(Window::DimX, Window::Dimension(out_win.x().start(), out_win.x().end(), out_win.x().step() * 2));
+    out_win.set(Window::DimX, Window::Dimension(out_win.x().start(), out_win.x().end(), out_win.x().step() / _x_subsampling[1]));
     out_win.set(Window::DimY, Window::Dimension(out_win.y().start() / _y_subsampling[1], out_win.y().end() / _y_subsampling[1], 1));
     out_win.validate();
 
@@ -421,6 +406,9 @@
     Iterator  p2(_planes[2 - shift], uv_win);
     Iterator  out(_output_multi->plane(1), out_win);
 
+    // Increase step size after iterator is created to calculate stride correctly for multi channel format
+    out_win.set_dimension_step(Window::DimX, out_win.x().step() * _x_subsampling[1]);
+
     execute_window_loop(out_win, [&](const Coordinates & id)
     {
         const uint8x8x2_t pixels =
@@ -450,19 +438,17 @@
 
     // Update window
     Window tmp_win(win);
-    tmp_win.set(Window::DimX, Window::Dimension(tmp_win.x().start() / _x_subsampling[plane_id], tmp_win.x().end() / _x_subsampling[plane_id], _num_elems_processed_per_iteration));
+    tmp_win.set(Window::DimX, Window::Dimension(tmp_win.x().start() / _x_subsampling[plane_id], tmp_win.x().end() / _x_subsampling[plane_id], tmp_win.x().step() / _x_subsampling[plane_id]));
     tmp_win.set(Window::DimY, Window::Dimension(tmp_win.y().start() / _y_subsampling[plane_id], tmp_win.y().end() / _y_subsampling[plane_id], 1));
-    tmp_win.validate();
 
     Iterator in(_planes[plane_id], tmp_win);
     Iterator out(_output_multi->plane(plane_id), tmp_win);
 
     execute_window_loop(tmp_win, [&](const Coordinates & id)
     {
-        const auto in_ptr  = static_cast<uint8_t *>(in.ptr());
-        const auto out_ptr = static_cast<uint8_t *>(out.ptr());
+        const uint8x8_t pixels = vld1_u8(in.ptr());
 
-        vst1_u8(out_ptr, vld1_u8(in_ptr));
+        vst1_u8(out.ptr(), pixels);
     },
     in, out);
 }
diff --git a/tests/validation/CL/ChannelCombine.cpp b/tests/validation/CL/ChannelCombine.cpp
new file mode 100644
index 0000000..fd9049a
--- /dev/null
+++ b/tests/validation/CL/ChannelCombine.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLMultiImage.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLChannelCombine.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ConvertPolicyDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ChannelCombineFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+inline void validate_configuration(const TensorShape &shape, Format format)
+{
+    const int num_planes = num_planes_from_format(format);
+
+    // Create tensors
+    CLMultiImage          dst     = create_multi_image<CLMultiImage>(shape, format);
+    std::vector<CLTensor> ref_src = create_tensor_planes<CLTensor>(shape, format);
+
+    // Create and configure function
+    CLChannelCombine channel_combine;
+
+    if(num_planes == 1)
+    {
+        const CLTensor *tensor_extra = ((Format::RGBA8888 == format) ? &ref_src[3] : nullptr);
+
+        channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], tensor_extra, dst.cl_plane(0));
+    }
+    else
+    {
+        channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], &dst);
+    }
+
+    // TODO(bsgcomp): Add validation for padding and shape (COMPMID-659)
+}
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(ChannelCombine)
+
+template <typename T>
+using CLChannelCombineFixture = ChannelCombineValidationFixture<CLMultiImage, CLTensor, CLAccessor, CLChannelCombine, T>;
+
+TEST_SUITE(Configuration)
+DATA_TEST_CASE(RGBA, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })),
+               shape, format)
+{
+    validate_configuration(shape, format);
+}
+DATA_TEST_CASE(YUV, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })),
+               shape, format)
+{
+    validate_configuration(shape, format);
+}
+
+DATA_TEST_CASE(YUVPlanar, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::IYUV, Format::YUV444, Format::NV12, Format::NV21 })),
+               shape, format)
+{
+    validate_configuration(shape, format);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(RGBA)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+TEST_SUITE_END()
+
+TEST_SUITE(YUV)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+TEST_SUITE_END()
+
+TEST_SUITE(YUVPlanar)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::NV12, Format::NV21, Format::IYUV, Format::YUV444 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::NV12, Format::NV21, Format::IYUV, Format::YUV444 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(CLAccessor(*_target.cl_plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/NEON/ChannelCombine.cpp b/tests/validation/NEON/ChannelCombine.cpp
new file mode 100644
index 0000000..3dbc64d
--- /dev/null
+++ b/tests/validation/NEON/ChannelCombine.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/MultiImage.h"
+#include "arm_compute/runtime/NEON/functions/NEChannelCombine.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ConvertPolicyDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ChannelCombineFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+inline void validate_configuration(const TensorShape &shape, Format format)
+{
+    const int num_planes = num_planes_from_format(format);
+
+    // Create tensors
+    MultiImage          dst     = create_multi_image<MultiImage>(shape, format);
+    std::vector<Tensor> ref_src = create_tensor_planes<Tensor>(shape, format);
+
+    // Create and configure function
+    NEChannelCombine channel_combine;
+
+    if(num_planes == 1)
+    {
+        const Tensor *tensor_extra = Format::RGBA8888 == format ? &ref_src[3] : nullptr;
+
+        channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], tensor_extra, dst.plane(0));
+    }
+    else
+    {
+        channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], &dst);
+    }
+
+    // TODO(bsgcomp): Add validation for padding and shape (COMPMID-659)
+}
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(ChannelCombine)
+
+TEST_SUITE(Configuration)
+DATA_TEST_CASE(RGBA, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })),
+               shape, format)
+{
+    validate_configuration(shape, format);
+}
+DATA_TEST_CASE(YUV, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })),
+               shape, format)
+{
+    validate_configuration(shape, format);
+}
+
+DATA_TEST_CASE(YUVPlanar, framework::DatasetMode::ALL, combine(concat(datasets::Small2DShapes(), datasets::Large2DShapes()), framework::dataset::make("FormatType", { Format::IYUV, Format::YUV444, Format::NV12, Format::NV21 })),
+               shape, format)
+{
+    validate_configuration(shape, format);
+}
+TEST_SUITE_END()
+
+template <typename T>
+using NEChannelCombineFixture = ChannelCombineValidationFixture<MultiImage, Tensor, Accessor, NEChannelCombine, T>;
+
+TEST_SUITE(RGBA)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::RGB888, Format::RGBA8888 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+TEST_SUITE_END()
+
+TEST_SUITE(YUV)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::YUYV422, Format::UYVY422 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+TEST_SUITE_END()
+
+TEST_SUITE(YUVPlanar)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("FormatType", { Format::NV12, Format::NV21, Format::IYUV, Format::YUV444 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEChannelCombineFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("FormatType", { Format::NV12, Format::NV21, Format::IYUV, Format::YUV444 })))
+{
+    // Validate output
+    for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+    {
+        validate(Accessor(*_target.plane(plane_idx)), _reference[plane_idx]);
+    }
+}
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/ChannelCombineFixture.h b/tests/validation/fixtures/ChannelCombineFixture.h
new file mode 100644
index 0000000..68d0237
--- /dev/null
+++ b/tests/validation/fixtures/ChannelCombineFixture.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE
+#define ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/Helpers.h"
+#include "tests/validation/reference/ChannelCombine.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+template <typename TensorType>
+inline std::vector<TensorType> create_tensor_planes(const TensorShape &shape, Format format)
+{
+    TensorShape image_shape = adjust_odd_shape(shape, format);
+    TensorInfo  info(image_shape, Format::U8);
+
+    std::vector<TensorType> tensor_planes;
+
+    switch(format)
+    {
+        case Format::RGB888:
+        case Format::RGBA8888:
+        case Format::YUV444:
+        {
+            tensor_planes.resize(3);
+
+            if(format == Format::RGBA8888)
+            {
+                tensor_planes.resize(4);
+            }
+
+            for(unsigned int plane_idx = 0; plane_idx < tensor_planes.size(); ++plane_idx)
+            {
+                tensor_planes[plane_idx].allocator()->init(info);
+            }
+
+            break;
+        }
+        case Format::YUYV422:
+        case Format::UYVY422:
+        {
+            const TensorShape uv_shape = calculate_subsampled_shape(image_shape, format);
+            const TensorInfo  info_hor2(uv_shape, Format::U8);
+
+            tensor_planes.resize(3);
+
+            tensor_planes[0].allocator()->init(info);
+            tensor_planes[1].allocator()->init(info_hor2);
+            tensor_planes[2].allocator()->init(info_hor2);
+            break;
+        }
+        case Format::NV12:
+        case Format::NV21:
+        case Format::IYUV:
+        {
+            const TensorShape sub2_shape = calculate_subsampled_shape(image_shape, format);
+            const TensorInfo  info_sub2(sub2_shape, Format::U8);
+
+            tensor_planes.resize(3);
+
+            tensor_planes[0].allocator()->init(info);
+            tensor_planes[1].allocator()->init(info_sub2);
+            tensor_planes[2].allocator()->init(info_sub2);
+            break;
+        }
+        default:
+            ARM_COMPUTE_ERROR("Not supported");
+            break;
+    }
+
+    return tensor_planes;
+}
+} // namespace
+
+template <typename MultiImageType, typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ChannelCombineValidationFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, Format format)
+    {
+        _num_planes = num_planes_from_format(format);
+        _target     = compute_target(shape, format);
+        _reference  = compute_reference(shape, format);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor, int i)
+    {
+        library->fill_tensor_uniform(tensor, i);
+    }
+
+    template <typename U>
+    std::vector<SimpleTensor<U>> create_tensor_planes_reference(const TensorShape &shape, Format format)
+    {
+        std::vector<SimpleTensor<U>> tensor_planes;
+
+        TensorShape image_shape = adjust_odd_shape(shape, format);
+
+        switch(format)
+        {
+            case Format::RGB888:
+            case Format::RGBA8888:
+            case Format::YUV444:
+            {
+                if(format == Format::RGBA8888)
+                {
+                    tensor_planes.emplace_back(image_shape, Format::U8);
+                }
+
+                tensor_planes.emplace_back(image_shape, Format::U8);
+                tensor_planes.emplace_back(image_shape, Format::U8);
+                tensor_planes.emplace_back(image_shape, Format::U8);
+                break;
+            }
+            case Format::YUYV422:
+            case Format::UYVY422:
+            {
+                const TensorShape hor2_shape = calculate_subsampled_shape(image_shape, format);
+
+                tensor_planes.emplace_back(image_shape, Format::U8);
+                tensor_planes.emplace_back(hor2_shape, Format::U8);
+                tensor_planes.emplace_back(hor2_shape, Format::U8);
+                break;
+            }
+            case Format::NV12:
+            case Format::NV21:
+            case Format::IYUV:
+            {
+                const TensorShape shape_sub2 = calculate_subsampled_shape(image_shape, format);
+
+                tensor_planes.emplace_back(image_shape, Format::U8);
+                tensor_planes.emplace_back(shape_sub2, Format::U8);
+                tensor_planes.emplace_back(shape_sub2, Format::U8);
+                break;
+            }
+            default:
+                ARM_COMPUTE_ERROR("Not supported");
+                break;
+        }
+
+        return tensor_planes;
+    }
+
+    MultiImageType compute_target(const TensorShape &shape, Format format)
+    {
+        // Create tensors
+        std::vector<TensorType> ref_src = create_tensor_planes<TensorType>(shape, format);
+        MultiImageType          dst     = create_multi_image<MultiImageType>(shape, format);
+
+        // Create and configure function
+        FunctionType channel_combine;
+
+        if(1 == _num_planes)
+        {
+            const TensorType *tensor_extra = ((Format::RGBA8888 == format) ? &ref_src[3] : nullptr);
+            TensorType       *tensor_dst   = dynamic_cast<TensorType *>(dst.plane(0));
+
+            channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], tensor_extra, tensor_dst);
+        }
+        else
+        {
+            channel_combine.configure(&ref_src[0], &ref_src[1], &ref_src[2], &dst);
+        }
+
+        for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+        {
+            const TensorType *dst_plane = static_cast<const TensorType *>(dst.plane(plane_idx));
+
+            ARM_COMPUTE_EXPECT(dst_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
+        }
+
+        for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+        {
+            ARM_COMPUTE_EXPECT(ref_src[plane_idx].info()->is_resizable(), framework::LogLevel::ERRORS);
+        }
+
+        // Allocate tensors
+        dst.allocate();
+
+        for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+        {
+            ref_src[plane_idx].allocator()->allocate();
+        }
+
+        for(unsigned int plane_idx = 0; plane_idx < _num_planes; ++plane_idx)
+        {
+            const TensorType *dst_plane = static_cast<const TensorType *>(dst.plane(plane_idx));
+
+            ARM_COMPUTE_EXPECT(!dst_plane->info()->is_resizable(), framework::LogLevel::ERRORS);
+        }
+
+        for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+        {
+            ARM_COMPUTE_EXPECT(!ref_src[plane_idx].info()->is_resizable(), framework::LogLevel::ERRORS);
+        }
+
+        // Fill tensor planes
+        for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+        {
+            fill(AccessorType(ref_src[plane_idx]), plane_idx);
+        }
+
+        // Compute function
+        channel_combine.run();
+
+        return dst;
+    }
+
+    std::vector<SimpleTensor<T>> compute_reference(const TensorShape &shape, Format format)
+    {
+        // Create reference
+        std::vector<SimpleTensor<T>> ref_src = create_tensor_planes_reference<T>(shape, format);
+
+        // Fill references
+        for(unsigned int plane_idx = 0; plane_idx < ref_src.size(); ++plane_idx)
+        {
+            fill(ref_src[plane_idx], plane_idx);
+        }
+
+        return reference::channel_combine<T>(shape, ref_src, format);
+    }
+
+    unsigned int                 _num_planes{};
+    MultiImageType               _target{};
+    std::vector<SimpleTensor<T>> _reference{};
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_CHANNEL_COMBINE_FIXTURE */
diff --git a/tests/validation/reference/ChannelCombine.cpp b/tests/validation/reference/ChannelCombine.cpp
new file mode 100644
index 0000000..c1ec3ec
--- /dev/null
+++ b/tests/validation/reference/ChannelCombine.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "ChannelCombine.h"
+
+#include "arm_compute/core/Types.h"
+#include "tests/validation/FixedPoint.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+namespace
+{
+template <typename T>
+inline std::vector<SimpleTensor<T>> create_image_planes(const TensorShape &shape, Format format)
+{
+    TensorShape image_shape = adjust_odd_shape(shape, format);
+
+    std::vector<SimpleTensor<T>> image_planes;
+
+    switch(format)
+    {
+        case Format::RGB888:
+        case Format::RGBA8888:
+        case Format::YUYV422:
+        case Format::UYVY422:
+        {
+            image_planes.emplace_back(image_shape, format);
+            break;
+        }
+        case Format::NV12:
+        case Format::NV21:
+        {
+            TensorShape shape_uv88 = calculate_subsampled_shape(image_shape, Format::UV88);
+
+            image_planes.emplace_back(image_shape, Format::U8);
+            image_planes.emplace_back(shape_uv88, Format::UV88);
+            break;
+        }
+        case Format::IYUV:
+        {
+            TensorShape shape_sub2 = calculate_subsampled_shape(image_shape, Format::IYUV);
+
+            image_planes.emplace_back(image_shape, Format::U8);
+            image_planes.emplace_back(shape_sub2, Format::U8);
+            image_planes.emplace_back(shape_sub2, Format::U8);
+            break;
+        }
+        case Format::YUV444:
+        {
+            image_planes.emplace_back(image_shape, Format::U8);
+            image_planes.emplace_back(image_shape, Format::U8);
+            image_planes.emplace_back(image_shape, Format::U8);
+            break;
+        }
+        default:
+            ARM_COMPUTE_ERROR("Not supported");
+            break;
+    }
+
+    return image_planes;
+}
+} // namespace
+
+template <typename T>
+std::vector<SimpleTensor<T>> channel_combine(const TensorShape &shape, const std::vector<SimpleTensor<T>> &image_planes, Format format)
+{
+    std::vector<SimpleTensor<T>> dst = create_image_planes<T>(shape, format);
+
+    for(unsigned int plane_idx = 0; plane_idx < dst.size(); ++plane_idx)
+    {
+        SimpleTensor<T> &dst_tensor = dst[plane_idx];
+
+        for(int element_idx = 0; element_idx < dst_tensor.num_elements(); ++element_idx)
+        {
+            Coordinates coord = index2coord(dst_tensor.shape(), element_idx);
+
+            switch(format)
+            {
+                case Format::RGB888:
+                case Format::RGBA8888:
+                {
+                    // Copy R/G/B or A channel
+                    for(int channel_idx = 0; channel_idx < dst_tensor.num_channels(); ++channel_idx)
+                    {
+                        const T &src_value = reinterpret_cast<const T *>(image_planes[channel_idx](coord))[0];
+                        T       &dst_value = reinterpret_cast<T *>(dst_tensor(coord))[channel_idx];
+
+                        dst_value = src_value;
+                    }
+                    break;
+                }
+                case Format::YUYV422:
+                case Format::UYVY422:
+                {
+                    // Find coordinates of the sub-sampled pixel
+                    const Coordinates coord_hori(coord.x() / 2, coord.y());
+
+                    const T &src0 = reinterpret_cast<const T *>(image_planes[0](coord))[0];
+                    const T &src1 = reinterpret_cast<const T *>(image_planes[1](coord_hori))[0];
+
+                    const int shift = (Format::YUYV422 == format) ? 1 : 0;
+                    T        &dst0  = reinterpret_cast<T *>(dst_tensor(coord))[1 - shift];
+                    T        &dst1  = reinterpret_cast<T *>(dst_tensor(coord))[0 + shift];
+
+                    dst0 = src0;
+                    dst1 = src1;
+
+                    Coordinates coord2 = index2coord(dst_tensor.shape(), ++element_idx);
+
+                    const T &src2 = reinterpret_cast<const T *>(image_planes[0](coord2))[0];
+                    const T &src3 = reinterpret_cast<const T *>(image_planes[2](coord_hori))[0];
+
+                    T &dst2 = reinterpret_cast<T *>(dst_tensor(coord2))[1 - shift];
+                    T &dst3 = reinterpret_cast<T *>(dst_tensor(coord2))[0 + shift];
+
+                    dst2 = src2;
+                    dst3 = src3;
+
+                    break;
+                }
+                case Format::NV12:
+                case Format::NV21:
+                {
+                    if(0U == plane_idx)
+                    {
+                        // Get and combine Y channel from plane0 of destination multi-image
+                        dst_tensor[element_idx] = image_planes[0][element_idx];
+                    }
+                    else
+                    {
+                        const int shift = (Format::NV12 == format) ? 0 : 1;
+
+                        // Get U channel from plane1 and V channel from plane2 of the source
+                        const T &src_u0 = reinterpret_cast<const T *>(image_planes[1](coord))[0];
+                        const T &src_v0 = reinterpret_cast<const T *>(image_planes[2](coord))[0];
+
+                        // Get U and V channel from plane1 of destination multi-image
+                        T &dst_u0 = reinterpret_cast<T *>(dst_tensor(coord))[0 + shift];
+                        T &dst_v0 = reinterpret_cast<T *>(dst_tensor(coord))[1 - shift];
+
+                        // Combine channel U and V
+                        dst_u0 = src_u0;
+                        dst_v0 = src_v0;
+                    }
+
+                    break;
+                }
+                case Format::IYUV:
+                case Format::YUV444:
+                {
+                    // Get Y/U/V element
+                    const T &src = reinterpret_cast<const T *>(image_planes[plane_idx](coord))[0];
+                    T       &dst = reinterpret_cast<T *>(dst_tensor(coord))[0];
+
+                    // Copy Y/U/V plane
+                    dst = src;
+
+                    break;
+                }
+                default:
+                    ARM_COMPUTE_ERROR("Not supported");
+                    break;
+            }
+        }
+    }
+
+    return dst;
+}
+
+template std::vector<SimpleTensor<uint8_t>> channel_combine(const TensorShape &shape, const std::vector<SimpleTensor<uint8_t>> &image_planes, Format format);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/reference/ChannelCombine.h b/tests/validation/reference/ChannelCombine.h
new file mode 100644
index 0000000..cc6607d
--- /dev/null
+++ b/tests/validation/reference/ChannelCombine.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017-2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_CHANNEL_COMBINE_H__
+#define __ARM_COMPUTE_TEST_CHANNEL_COMBINE_H__
+
+#include "tests/SimpleTensor.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+std::vector<SimpleTensor<T>> channel_combine(const TensorShape &shape, const std::vector<SimpleTensor<T>> &image_planes, Format format);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_CHANNEL_COMBINE_H__ */