COMPMID-415: Move DepthConcatenateLayer to new validation

Change-Id: I3e594d5800f563ba9af3195b7db2b6d3e32012dd
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/81340
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
diff --git a/tests/validation_new/CL/DepthConcatenateLayer.cpp b/tests/validation_new/CL/DepthConcatenateLayer.cpp
new file mode 100644
index 0000000..ff64e23
--- /dev/null
+++ b/tests/validation_new/CL/DepthConcatenateLayer.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLDepthConcatenate.h"
+#include "framework/Asserts.h"
+#include "framework/Macros.h"
+#include "framework/datasets/Datasets.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets_new/ShapeDatasets.h"
+#include "tests/validation_new/Validation.h"
+#include "tests/validation_new/fixtures/DepthConcatenateLayerFixture.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(DepthConcatenateLayer)
+
+//TODO(COMPMID-415): Add configuration test?
+
+template <typename T>
+using CLDepthConcatenateLayerFixture = DepthConcatenateValidationFixture<CLTensor, CLAccessor, CLDepthConcatenate, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<half_float::half>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
+                       DataType::F16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<half_float::half>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType",
+                       DataType::F16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
+                                                                                                                   DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType",
+                                                                                                                 DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QS8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(),
+                                                                                                                    framework::dataset::make("DataType",
+                                                                                                                            DataType::QS8)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(),
+                                                                                                                  framework::dataset::make("DataType",
+                                                                                                                          DataType::QS8)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(),
+                                                                                                                     framework::dataset::make("DataType",
+                                                                                                                             DataType::QS16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(),
+                                                                                                                   framework::dataset::make("DataType",
+                                                                                                                           DataType::QS16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/CPP/DepthConcatenateLayer.cpp b/tests/validation_new/CPP/DepthConcatenateLayer.cpp
new file mode 100644
index 0000000..c54c6c8
--- /dev/null
+++ b/tests/validation_new/CPP/DepthConcatenateLayer.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "DepthConcatenateLayer.h"
+
+#include "tests/validation_new/FixedPoint.h"
+#include "tests/validation_new/Helpers.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs)
+{
+    // Create reference
+    std::vector<TensorShape> shapes;
+
+    for(const auto &src : srcs)
+    {
+        shapes.emplace_back(src.shape());
+    }
+
+    DataType        dst_type  = srcs.empty() ? DataType::UNKNOWN : srcs[0].data_type();
+    TensorShape     dst_shape = calculate_depth_concatenate_shape(shapes);
+    SimpleTensor<T> dst(dst_shape, dst_type);
+
+    // Compute reference
+    int       depth_offset = 0;
+    const int width_out    = dst.shape().x();
+    const int height_out   = dst.shape().y();
+    const int depth_out    = dst.shape().z();
+    const int out_stride_z = width_out * height_out;
+    const int batches      = dst.shape().total_size_upper(3);
+
+    // Set output tensor to 0
+    std::fill_n(dst.data(), dst.num_elements(), 0);
+
+    for(const auto &src : srcs)
+    {
+        ARM_COMPUTE_ERROR_ON(depth_offset >= depth_out);
+        ARM_COMPUTE_ERROR_ON(batches != static_cast<int>(src.shape().total_size_upper(3)));
+
+        const int width  = src.shape().x();
+        const int height = src.shape().y();
+        const int depth  = src.shape().z();
+        const int x_diff = (width_out - width) / 2;
+        const int y_diff = (height_out - height) / 2;
+
+        const T *src_ptr = src.data();
+
+        for(int b = 0; b < batches; ++b)
+        {
+            const size_t offset_to_first_element = b * out_stride_z * depth_out + depth_offset * out_stride_z + y_diff * width_out + x_diff;
+
+            for(int d = 0; d < depth; ++d)
+            {
+                for(int r = 0; r < height; ++r)
+                {
+                    std::copy(src_ptr, src_ptr + width, dst.data() + offset_to_first_element + d * out_stride_z + r * width_out);
+                    src_ptr += width;
+                }
+            }
+        }
+
+        depth_offset += depth;
+    }
+
+    return dst;
+}
+
+template SimpleTensor<float> depthconcatenate_layer(const std::vector<SimpleTensor<float>> &srcs);
+template SimpleTensor<half_float::half> depthconcatenate_layer(const std::vector<SimpleTensor<half_float::half>> &srcs);
+template SimpleTensor<qint8_t> depthconcatenate_layer(const std::vector<SimpleTensor<qint8_t>> &srcs);
+template SimpleTensor<qint16_t> depthconcatenate_layer(const std::vector<SimpleTensor<qint16_t>> &srcs);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/CPP/DepthConcatenateLayer.h b/tests/validation_new/CPP/DepthConcatenateLayer.h
new file mode 100644
index 0000000..e7467d8
--- /dev/null
+++ b/tests/validation_new/CPP/DepthConcatenateLayer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_H__
+#define __ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_H__
+
+#include "tests/validation_new/SimpleTensor.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T>
+SimpleTensor<T> depthconcatenate_layer(const std::vector<SimpleTensor<T>> &srcs);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_H__ */
diff --git a/tests/validation_new/Helpers.cpp b/tests/validation_new/Helpers.cpp
new file mode 100644
index 0000000..c65966b
--- /dev/null
+++ b/tests/validation_new/Helpers.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "tests/validation_new/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TensorShape calculate_depth_concatenate_shape(const std::vector<TensorShape> &input_shapes)
+{
+    ARM_COMPUTE_ERROR_ON(input_shapes.empty());
+
+    TensorShape out_shape = input_shapes[0];
+
+    size_t max_x = 0;
+    size_t max_y = 0;
+    size_t depth = 0;
+
+    for(const auto &shape : input_shapes)
+    {
+        max_x = std::max(shape.x(), max_x);
+        max_y = std::max(shape.y(), max_y);
+        depth += shape.z();
+    }
+
+    out_shape.set(0, max_x);
+    out_shape.set(1, max_y);
+    out_shape.set(2, depth);
+
+    return out_shape;
+}
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/Helpers.h b/tests/validation_new/Helpers.h
index 3058b8e..3095916 100644
--- a/tests/validation_new/Helpers.h
+++ b/tests/validation_new/Helpers.h
@@ -128,6 +128,14 @@
 
     return bounds;
 }
+
+/** Calculate output tensor shape give a vector of input tensor to concatenate
+ *
+ * @param[in] input_shapes Shapes of the tensors to concatenate across depth.
+ *
+ * @return The shape of output concatenated tensor.
+ */
+TensorShape calculate_depth_concatenate_shape(const std::vector<TensorShape> &input_shapes);
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation_new/NEON/DepthConcatenateLayer.cpp b/tests/validation_new/NEON/DepthConcatenateLayer.cpp
new file mode 100644
index 0000000..d6400d2
--- /dev/null
+++ b/tests/validation_new/NEON/DepthConcatenateLayer.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEDepthConcatenate.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "framework/Asserts.h"
+#include "framework/Macros.h"
+#include "framework/datasets/Datasets.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/datasets_new/ShapeDatasets.h"
+#include "tests/validation_new/Validation.h"
+#include "tests/validation_new/fixtures/DepthConcatenateLayerFixture.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(NEON)
+TEST_SUITE(DepthConcatenateLayer)
+
+//TODO(COMPMID-415): Add configuration test?
+
+template <typename T>
+using NEDepthConcatenateLayerFixture = DepthConcatenateValidationFixture<Tensor, Accessor, NEDepthConcatenate, T>;
+
+TEST_SUITE(Float)
+#ifdef ARM_COMPUTE_ENABLE_FP16
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<half_float::half>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
+                       DataType::F16)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<half_float::half>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType",
+                       DataType::F16)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(), framework::dataset::make("DataType",
+                                                                                                                   DataType::F32)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(), framework::dataset::make("DataType",
+                                                                                                                 DataType::F32)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QS8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(),
+                                                                                                                    framework::dataset::make("DataType",
+                                                                                                                            DataType::QS8)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(),
+                                                                                                                  framework::dataset::make("DataType",
+                                                                                                                          DataType::QS8)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::Small2DShapes(),
+                                                                                                                     framework::dataset::make("DataType",
+                                                                                                                             DataType::QS16)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEDepthConcatenateLayerFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::Large2DShapes(),
+                                                                                                                   framework::dataset::make("DataType",
+                                                                                                                           DataType::QS16)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/fixtures/DepthConcatenateLayerFixture.h b/tests/validation_new/fixtures/DepthConcatenateLayerFixture.h
new file mode 100644
index 0000000..601758f
--- /dev/null
+++ b/tests/validation_new/fixtures/DepthConcatenateLayerFixture.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "framework/Asserts.h"
+#include "framework/Fixture.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation_new/CPP/DepthConcatenateLayer.h"
+#include "tests/validation_new/Helpers.h"
+
+#include <random>
+
+namespace arm_compute
+{
+class ITensor;
+class Tensor;
+class ICLTensor;
+class CLTensor;
+
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class DepthConcatenateValidationFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, DataType data_type)
+    {
+        // Create input shapes
+        std::mt19937                    gen(library->seed());
+        std::uniform_int_distribution<> num_dis(2, 6);
+        const int                       num_tensors = num_dis(gen);
+
+        std::vector<TensorShape>         shapes(num_tensors, shape);
+        std::uniform_int_distribution<>  depth_dis(1, 7);
+        std::bernoulli_distribution      mutate_dis(0.25f);
+        std::uniform_real_distribution<> change_dis(-0.25f, 0.f);
+
+        // Generate more shapes based on the input
+        for(auto &s : shapes)
+        {
+            // Set the depth of the tensor
+            s.set(2, depth_dis(gen));
+
+            // Randomly change the first dimension
+            if(mutate_dis(gen))
+            {
+                // Decrease the dimension by a small percentage. Don't increase
+                // as that could make tensor too large. Also the change must be
+                // an even number. Otherwise out depth concatenate fails.
+                s.set(0, s[0] + 2 * static_cast<int>(s[0] * change_dis(gen)));
+            }
+
+            // Repeat the same as above for the second dimension
+            if(mutate_dis(gen))
+            {
+                s.set(1, s[1] + 2 * static_cast<int>(s[1] * change_dis(gen)));
+            }
+        }
+
+        _target    = compute_target(shapes, data_type);
+        _reference = compute_reference(shapes, data_type);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor, int i)
+    {
+        library->fill_tensor_uniform(tensor, i);
+    }
+
+    TensorType compute_target(std::vector<TensorShape> shapes, DataType data_type)
+    {
+        using ITensorType = typename std::conditional<std::is_same<TensorType, Tensor>::value, ITensor, ICLTensor>::type;
+
+        std::vector<TensorType>    srcs;
+        std::vector<ITensorType *> src_ptrs;
+
+        // Create tensors
+        srcs.reserve(shapes.size());
+
+        for(const auto &shape : shapes)
+        {
+            srcs.emplace_back(create_tensor<TensorType>(shape, data_type, 1, _fractional_bits));
+            src_ptrs.emplace_back(&srcs.back());
+        }
+
+        TensorShape dst_shape = calculate_depth_concatenate_shape(shapes);
+        TensorType  dst       = create_tensor<TensorType>(dst_shape, data_type, 1, _fractional_bits);
+
+        // Create and configure function
+        FunctionType depth_concat;
+        depth_concat.configure(src_ptrs, &dst);
+
+        for(auto &src : srcs)
+        {
+            ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        }
+
+        ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        for(auto &src : srcs)
+        {
+            src.allocator()->allocate();
+            ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        }
+
+        dst.allocator()->allocate();
+        ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        int i = 0;
+        for(auto &src : srcs)
+        {
+            fill(AccessorType(src), i++);
+        }
+
+        // Compute function
+        depth_concat.run();
+
+        return dst;
+    }
+
+    SimpleTensor<T> compute_reference(std::vector<TensorShape> shapes, DataType data_type)
+    {
+        std::vector<SimpleTensor<T>> srcs;
+
+        // Create and fill tensors
+        int i = 0;
+        for(const auto &shape : shapes)
+        {
+            srcs.emplace_back(shape, data_type, 1, _fractional_bits);
+            fill(srcs.back(), i++);
+        }
+
+        return reference::depthconcatenate_layer<T>(srcs);
+    }
+
+    TensorType      _target{};
+    SimpleTensor<T> _reference{};
+
+private:
+    int _fractional_bits{ 1 };
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_DEPTHCONCATENATE_LAYER_FIXTURE */