COMPMID-415: Move SoftmaxLayer to new validation

Change-Id: I68bb359021256e67892e4fc00d436f9027a3bd07
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/80942
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
diff --git a/tests/datasets_new/ShapeDatasets.h b/tests/datasets_new/ShapeDatasets.h
new file mode 100644
index 0000000..690e401
--- /dev/null
+++ b/tests/datasets_new/ShapeDatasets.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_SHAPE_DATASETS_H__
+#define __ARM_COMPUTE_TEST_SHAPE_DATASETS_H__
+
+#include "arm_compute/core/TensorShape.h"
+#include "framework/datasets/Datasets.h"
+
+#include <type_traits>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+/** Data set containing one 1D tensor shape. */
+class Small1DShape final : public framework::dataset::SingletonDataset<TensorShape>
+{
+public:
+    Small1DShape()
+        : SingletonDataset("Shape", TensorShape{ 256U })
+    {
+    }
+};
+
+/** Parent type for all for shape datasets. */
+using ShapeDataset = framework::dataset::ContainerDataset<std::vector<TensorShape>>;
+
+/** Data set containing two small 2D tensor shapes. */
+class Small2DShapes final : public ShapeDataset
+{
+public:
+    Small2DShapes()
+        : ShapeDataset("Shape",
+    {
+        TensorShape{ 5U, 5U },
+                     TensorShape{ 640U, 480U }
+    })
+    {
+    }
+};
+
+/** Data set containing small tensor shapes. */
+class SmallShapes final : public ShapeDataset
+{
+public:
+    SmallShapes()
+        : ShapeDataset("Shape",
+    {
+        TensorShape{ 7U, 7U },
+                     TensorShape{ 27U, 13U, 2U },
+                     TensorShape{ 128U, 64U, 1U, 3U }
+    })
+    {
+    }
+};
+
+/** Data set containing large tensor shapes. */
+class LargeShapes final : public ShapeDataset
+{
+public:
+    LargeShapes()
+        : ShapeDataset("Shape",
+    {
+        TensorShape{ 1920U, 1080U },
+                     TensorShape{ 1245U, 652U, 1U, 3U },
+                     TensorShape{ 4160U, 3120U }
+    })
+    {
+    }
+};
+
+/** Data set containing two 2D large tensor shapes. */
+class Large2DShapes final : public ShapeDataset
+{
+public:
+    Large2DShapes()
+        : ShapeDataset("Shape",
+    {
+        TensorShape{ 1920U, 1080U },
+                     TensorShape{ 4160U, 3120U }
+    })
+    {
+    }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_SHAPE_DATASETS_H__ */
diff --git a/tests/validation/CL/SoftmaxLayer.cpp b/tests/validation/CL/SoftmaxLayer.cpp
deleted file mode 100644
index 5e03785..0000000
--- a/tests/validation/CL/SoftmaxLayer.cpp
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "AssetsLibrary.h"
-#include "CL/CLAccessor.h"
-#include "Globals.h"
-#include "PaddingCalculator.h"
-#include "TypePrinter.h"
-#include "Utils.h"
-#include "validation/Datasets.h"
-#include "validation/Reference.h"
-#include "validation/Validation.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/CLTensor.h"
-#include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
-
-#include "boost_wrapper.h"
-
-#include <random>
-#include <string>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-const float tolerance      = 0.000001f; /** Tolerance for float operations */
-const float tolerance_qs8  = 2.f;       /** Tolerance for QS8 fixed point operations */
-const float tolerance_qs16 = 2.f;       /** Tolerance for QS16 fixed point operations */
-
-/** Compute OpenCL softmax layer function.
- *
- * @param[in] shape                Shape of the input and output tensors.
- * @param[in] dt                   Shape Data type of tensors.
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of fixed point numbers.
- *
- * @return Computed output tensor.
- */
-CLTensor compute_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position = 0)
-{
-    // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
-
-    // Create and configure function
-    CLSoftmaxLayer smx_layer;
-    smx_layer.configure(&src, &dst);
-
-    // Allocate tensors
-    src.allocator()->allocate();
-    dst.allocator()->allocate();
-
-    BOOST_TEST(!src.info()->is_resizable());
-    BOOST_TEST(!dst.info()->is_resizable());
-
-    // Fill tensors
-    if(arm_compute::is_data_type_float(dt))
-    {
-        std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
-        library->fill(CLAccessor(src), distribution, 0);
-    }
-    else
-    {
-        int                             one_fixed = 1 << fixed_point_position;
-        std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
-        library->fill(CLAccessor(src), distribution, 0);
-    }
-
-    // Compute function
-    smx_layer.run();
-
-    return dst;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(CL)
-BOOST_AUTO_TEST_SUITE(SoftmaxLayer)
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * CNNDataTypes(), shape, dt)
-{
-    // Set fixed point position data type allowed
-    int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
-    // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape, dt, 1, fixed_point_position);
-
-    BOOST_TEST(src.info()->is_resizable());
-    BOOST_TEST(dst.info()->is_resizable());
-
-    // Create and configure function
-    CLSoftmaxLayer smx_layer;
-    smx_layer.configure(&src, &dst);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(src.info()->valid_region(), valid_region);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes(), shape, dt)
-{
-    // Compute function
-    CLTensor dst = compute_softmax_layer(shape, dt);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * CNNFloatDataTypes(), shape, dt)
-{
-    // Compute function
-    CLTensor dst = compute_softmax_layer(shape, dt);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-BOOST_AUTO_TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::xrange(1, 6),
-                     shape, fixed_point_position)
-{
-    // Compute function
-    CLTensor dst = compute_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_qs8);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::xrange(1, 6),
-                     shape, fixed_point_position)
-{
-    // Compute function
-    CLTensor dst = compute_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_qs8);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::xrange(1, 14),
-                     shape, fixed_point_position)
-{
-    // Compute function
-    CLTensor dst = compute_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_qs16);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::xrange(1, 14),
-                     shape, fixed_point_position)
-{
-    // Compute function
-    CLTensor dst = compute_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_qs16);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/NEON/SoftmaxLayer.cpp b/tests/validation/NEON/SoftmaxLayer.cpp
deleted file mode 100644
index 8422ba3..0000000
--- a/tests/validation/NEON/SoftmaxLayer.cpp
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "AssetsLibrary.h"
-#include "Globals.h"
-#include "NEON/Accessor.h"
-#include "PaddingCalculator.h"
-#include "TypePrinter.h"
-#include "Utils.h"
-#include "validation/Datasets.h"
-#include "validation/Reference.h"
-#include "validation/Validation.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/TensorAllocator.h"
-
-#include "boost_wrapper.h"
-
-#include <random>
-#include <string>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-/** Tolerance for float operations */
-const float tolerance_f32 = 0.000001f;
-#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.0001f;
-#endif /* ARM_COMPUTE_ENABLE_FP16*/
-/** Tolerance for fixed point operations */
-const float tolerance_fixed_point = 2.f;
-
-/** Compute Neon softmax layer function.
- *
- * @param[in] shape                Shape of the input and output tensors.
- * @param[in] dt                   Shape Data type of tensors.
- * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of fixed point numbers.
- *
- * @return Computed output tensor.
- */
-Tensor compute_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position = 0)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
-
-    // Create and configure function
-    NESoftmaxLayer smx_layer;
-    smx_layer.configure(&src, &dst);
-
-    // Allocate tensors
-    src.allocator()->allocate();
-    dst.allocator()->allocate();
-
-    BOOST_TEST(!src.info()->is_resizable());
-    BOOST_TEST(!dst.info()->is_resizable());
-
-    // Fill tensors
-    if(arm_compute::is_data_type_float(dt))
-    {
-        std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
-        library->fill(Accessor(src), distribution, 0);
-    }
-    else
-    {
-        int                             one_fixed = 1 << fixed_point_position;
-        std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
-        library->fill(Accessor(src), distribution, 0);
-    }
-
-    // Compute function
-    smx_layer.run();
-
-    return dst;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(NEON)
-BOOST_AUTO_TEST_SUITE(SoftmaxLayer)
-
-#ifdef ARM_COMPUTE_ENABLE_FP16
-BOOST_AUTO_TEST_SUITE(Float16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes(), shape)
-{
-    // Compute function
-    Tensor dst = compute_softmax_layer(shape, DataType::F16);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::F16);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_f16);
-}
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* ARM_COMPUTE_ENABLE_FP16*/
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(Configuration, (SmallShapes() + LargeShapes()) * CNNDataTypes(), shape, dt)
-{
-    // Set fixed point position data type allowed
-    int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape, dt, 1, fixed_point_position);
-
-    BOOST_TEST(src.info()->is_resizable());
-    BOOST_TEST(dst.info()->is_resizable());
-
-    // Create and configure function
-    NESoftmaxLayer smx_layer;
-    smx_layer.configure(&src, &dst);
-
-    // Validate valid region
-    const ValidRegion valid_region = shape_to_valid_region(shape);
-    validate(src.info()->valid_region(), valid_region);
-    validate(dst.info()->valid_region(), valid_region);
-
-    // Validate padding
-    const int         step    = 16 / arm_compute::data_size_from_type(dt);
-    const PaddingSize padding = PaddingCalculator(shape.x(), step).required_padding();
-    validate(src.info()->padding(), padding);
-    validate(dst.info()->padding(), padding);
-}
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes(), shape, dt)
-{
-    // Compute function
-    Tensor dst = compute_softmax_layer(shape, dt);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_f32);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * CNNFloatDataTypes(), shape, dt)
-{
-    // Compute function
-    Tensor dst = compute_softmax_layer(shape, dt);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, dt);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_f32);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-BOOST_AUTO_TEST_SUITE(QS8)
-// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::xrange(1, 6),
-                     shape, fixed_point_position)
-{
-    // Compute function
-    Tensor dst = compute_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_fixed_point);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::xrange(1, 6),
-                     shape, fixed_point_position)
-{
-    // Compute function
-    Tensor dst = compute_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS8, fixed_point_position);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_fixed_point);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * boost::unit_test::data::xrange(1, 14),
-                     shape, fixed_point_position)
-{
-    // Compute function
-    Tensor dst = compute_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_fixed_point);
-}
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(RunLarge, LargeShapes() * boost::unit_test::data::xrange(1, 14),
-                     shape, fixed_point_position)
-{
-    // Compute function
-    Tensor dst = compute_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_softmax_layer(shape, DataType::QS16, fixed_point_position);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_fixed_point);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index b94a0e5..5c66990 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -738,31 +738,6 @@
     return ref_dst;
 }
 
-RawTensor Reference::compute_reference_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position)
-{
-    // Create reference
-    RawTensor ref_src(shape, dt, 1, fixed_point_position);
-    RawTensor ref_dst(shape, dt, 1, fixed_point_position);
-
-    // Fill reference
-    if(arm_compute::is_data_type_float(dt))
-    {
-        std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
-        library->fill(ref_src, distribution, 0);
-    }
-    else
-    {
-        int                             one_fixed = 1 << fixed_point_position;
-        std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
-        library->fill(ref_src, distribution, 0);
-    }
-
-    // Compute reference
-    ReferenceCPP::softmax_layer(ref_src, ref_dst);
-
-    return ref_dst;
-}
-
 RawTensor Reference::compute_reference_fixed_point_operation(const TensorShape &shape, DataType dt_in, DataType dt_out, FixedPointOp op, int fixed_point_position)
 {
     // Create reference
diff --git a/tests/validation/Reference.h b/tests/validation/Reference.h
index c540ec4..034a308 100644
--- a/tests/validation/Reference.h
+++ b/tests/validation/Reference.h
@@ -382,15 +382,6 @@
      * @param[in] pool_info ROI Pooling Layer information.
      */
     static RawTensor compute_reference_roi_pooling_layer(const TensorShape &shape, DataType dt, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info);
-    /** Compute reference softmax layer.
-     *
-     * @param[in] shape                Shape of the input and output tensors.
-     * @param[in] dt                   Data type of input and output tensors.
-     * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers
-     *
-     * @return Computed raw tensor.
-     */
-    static RawTensor compute_reference_softmax_layer(const TensorShape &shape, DataType dt, int fixed_point_position = 0);
     /** Compute reference fixed point operation.
      *
      * @param[in] shape                Shape of the input and output tensors.
diff --git a/tests/validation/ReferenceCPP.cpp b/tests/validation/ReferenceCPP.cpp
index 07801ab..dd243719 100644
--- a/tests/validation/ReferenceCPP.cpp
+++ b/tests/validation/ReferenceCPP.cpp
@@ -361,14 +361,6 @@
     boost::apply_visitor(tensor_visitors::roi_pooling_layer_visitor(s, rois, pool_info), d);
 }
 
-// Softmax Layer
-void ReferenceCPP::softmax_layer(const RawTensor &src, RawTensor &dst)
-{
-    const TensorVariant s = TensorFactory::get_tensor(src);
-    TensorVariant       d = TensorFactory::get_tensor(dst);
-    boost::apply_visitor(tensor_visitors::softmax_layer_visitor(s), d);
-}
-
 // Fixed point operation
 void ReferenceCPP::fixed_point_operation(const RawTensor &src, RawTensor &dst, FixedPointOp op)
 {
diff --git a/tests/validation/ReferenceCPP.h b/tests/validation/ReferenceCPP.h
index 248cdc4..6d4d243 100644
--- a/tests/validation/ReferenceCPP.h
+++ b/tests/validation/ReferenceCPP.h
@@ -319,12 +319,6 @@
      * @param[in]  pool_info ROI Pooling Layer information.
      */
     static void roi_pooling_layer(const RawTensor &src, RawTensor &dst, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info);
-    /** Softmax Layer of @p src.
-     *
-     * @param[in]  src Input tensor.
-     * @param[out] dst Result tensor.
-     */
-    static void softmax_layer(const RawTensor &src, RawTensor &dst);
     /** Fixed point operations of @p src
      *
      * @param[in]  src Input tensor.
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 359dfe8..5018bfd 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -1606,85 +1606,6 @@
     }
 }
 
-// Softmax Layer
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
-void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
-{
-    const int cols       = static_cast<int>(in.shape()[0]);
-    const int upper_dims = in.shape().total_size() / cols;
-    for(int r = 0; r < upper_dims; ++r)
-    {
-        // Find max
-        T max = std::numeric_limits<T>::lowest();
-        for(int c = 0; c < cols; ++c)
-        {
-            const T x = in[r * cols + c];
-            if(x > max)
-            {
-                max = x;
-            }
-        }
-
-        // Regularize
-        T sum(0);
-        for(int c = 0; c < cols; ++c)
-        {
-            const T res       = exp(in[r * cols + c] - max);
-            out[r * cols + c] = res;
-            sum += res;
-        }
-
-        // Normalize
-        const T norm_val = static_cast<T>(1) / sum;
-        for(int c = 0; c < cols; ++c)
-        {
-            out[r * cols + c] *= norm_val;
-        }
-    }
-}
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
-void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
-{
-    using namespace fixed_point_arithmetic;
-    using promoted_T = typename test::traits::promote<T>::type;
-
-    const int fixed_point_position = in.fixed_point_position();
-    const int cols                 = static_cast<int>(in.shape()[0]);
-    const int upper_dims           = in.shape().total_size() / cols;
-
-    for(int r = 0; r < upper_dims; ++r)
-    {
-        // Find max
-        fixed_point<T> max(std::numeric_limits<T>::lowest(), fixed_point_position, true);
-        for(int c = 0; c < cols; ++c)
-        {
-            const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
-            if(x > max)
-            {
-                max = x;
-            }
-        }
-
-        // Regularize
-        fixed_point<promoted_T> sum(0, fixed_point_position);
-        for(int c = 0; c < cols; ++c)
-        {
-            const fixed_point<T> x(in[r * cols + c], fixed_point_position, true);
-            fixed_point<T>       res = exp(x - max);
-            out[r * cols + c]        = res.raw();
-            sum                      = add(sum, static_cast<fixed_point<promoted_T>>(res));
-        }
-
-        // Normalize
-        fixed_point<T> sat_sum(sum);
-        for(int c = 0; c < cols; ++c)
-        {
-            const fixed_point<T> x(out[r * cols + c], fixed_point_position, true);
-            out[r * cols + c] = div(x, sat_sum).raw();
-        }
-    }
-}
-
 // Fixed point operations
 template <typename T>
 void fixed_point_operation(const Tensor<T> &in, Tensor<T> &out, FixedPointOp op)
diff --git a/tests/validation/TensorVisitors.h b/tests/validation/TensorVisitors.h
index 223b76f..bccb70a 100644
--- a/tests/validation/TensorVisitors.h
+++ b/tests/validation/TensorVisitors.h
@@ -409,25 +409,6 @@
     ROIPoolingLayerInfo     _pool_info;
 };
 
-// Softmax Layer visitor
-struct softmax_layer_visitor : public boost::static_visitor<>
-{
-public:
-    explicit softmax_layer_visitor(const TensorVariant &in)
-        : _in(in)
-    {
-    }
-
-    template <typename T>
-    void operator()(Tensor<T> &out) const
-    {
-        const auto &in = boost::get<Tensor<T>>(_in);
-        tensor_operations::softmax_layer(in, out);
-    }
-
-private:
-    const TensorVariant &_in;
-};
 // Fixed Point operations visitor
 struct fixed_point_operation_visitor : public boost::static_visitor<>
 {
diff --git a/tests/validation_new/CL/SoftmaxLayer.cpp b/tests/validation_new/CL/SoftmaxLayer.cpp
new file mode 100644
index 0000000..3edc7b2
--- /dev/null
+++ b/tests/validation_new/CL/SoftmaxLayer.cpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLSoftmaxLayer.h"
+#include "framework/Asserts.h"
+#include "framework/Macros.h"
+#include "framework/datasets/Datasets.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets_new/ShapeDatasets.h"
+#include "tests/validation_new/Validation.h"
+#include "tests/validation_new/fixtures/SoftmaxLayerFixture.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+/** Tolerance for float operations */
+constexpr float tolerance_f16 = 0.002f;
+constexpr float tolerance_f32 = 0.000001f;
+/** Tolerance for fixed point operations */
+constexpr int8_t tolerance_fixed_point = 2;
+
+/** CNN data types */
+const auto CNNDataTypes = framework::dataset::make("DataType",
+{
+    DataType::F16,
+    DataType::F32,
+    DataType::QS8,
+    DataType::QS16,
+});
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(SoftmaxLayer)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), shape, data_type)
+{
+    // Set fixed point position data type allowed
+    const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
+
+    // Create tensors
+    CLTensor src = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position);
+    CLTensor dst = create_tensor<CLTensor>(shape, data_type, 1, fixed_point_position);
+
+    ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    // Create and configure function
+    CLSoftmaxLayer smx_layer;
+    smx_layer.configure(&src, &dst);
+
+    // Validate valid region
+    const ValidRegion valid_region = shape_to_valid_region(shape);
+    validate(src.info()->valid_region(), valid_region);
+    validate(dst.info()->valid_region(), valid_region);
+
+    // Validate padding
+    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+    validate(src.info()->padding(), padding);
+    validate(dst.info()->padding(), padding);
+}
+
+template <typename T>
+using CLSoftmaxLayerFixture = SoftmaxValidationFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture<half_float::half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture<half_float::half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+template <typename T>
+using CLSoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<CLTensor, CLAccessor, CLSoftmaxLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QS8)
+// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
+FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
+                                                                                                                     DataType::QS8)),
+                                                                                                                     framework::dataset::make("FractionalBits", 1, 6)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_fixed_point);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType",
+                                                                                                                   DataType::QS8)),
+                                                                                                                   framework::dataset::make("FractionalBits", 1, 6)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_fixed_point);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
+FIXTURE_DATA_TEST_CASE(RunSmall, CLSoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
+                                                                                                                      DataType::QS16)),
+                                                                                                                      framework::dataset::make("FractionalBits", 1, 14)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_fixed_point);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLSoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType",
+                                                                                                                    DataType::QS16)),
+                                                                                                                    framework::dataset::make("FractionalBits", 1, 14)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_fixed_point);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/CPP/SoftmaxLayer.cpp b/tests/validation_new/CPP/SoftmaxLayer.cpp
new file mode 100644
index 0000000..8c2cda8
--- /dev/null
+++ b/tests/validation_new/CPP/SoftmaxLayer.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "SoftmaxLayer.h"
+
+#include "tests/validation_new/FixedPoint.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src)
+{
+    // Create reference
+    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+
+    // Compute reference
+    const int cols       = src.shape()[0];
+    const int upper_dims = src.num_elements() / cols;
+
+    for(int r = 0; r < upper_dims; ++r)
+    {
+        const T *src_row_ptr = src.data() + r * cols;
+        T       *dst_row_ptr = dst.data() + r * cols;
+
+        // Find max
+        const T max = *std::max_element(src_row_ptr, src_row_ptr + cols);
+
+        // Regularize
+        T sum(0.f);
+        std::transform(src_row_ptr, src_row_ptr + cols, dst_row_ptr, [&sum, max](T val)
+        {
+            const T res(std::exp(val - max));
+            sum += res;
+            return res;
+        });
+
+        // Normalize
+        std::transform(dst_row_ptr, dst_row_ptr + cols, dst_row_ptr, [sum](T val)
+        {
+            return val / sum;
+        });
+    }
+
+    return dst;
+}
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src)
+{
+    using namespace fixed_point_arithmetic;
+
+    // Create reference
+    SimpleTensor<T> dst{ src.shape(), src.data_type(), 1, src.fixed_point_position() };
+
+    // Compute reference
+    const int cols       = src.shape()[0];
+    const int upper_dims = src.num_elements() / cols;
+
+    for(int r = 0; r < upper_dims; ++r)
+    {
+        const T *src_row_ptr = src.data() + r * cols;
+        T       *dst_row_ptr = dst.data() + r * cols;
+
+        // Find max
+        const fixed_point<T> max(*std::max_element(src_row_ptr, src_row_ptr + cols), src.fixed_point_position(), true);
+
+        // Regularize
+        using promoted_type = fixed_point_arithmetic::traits::promote_t<T>;
+        fixed_point<promoted_type> sum(0, src.fixed_point_position(), true);
+        std::transform(src_row_ptr, src_row_ptr + cols, dst_row_ptr, [&](T val)
+        {
+            const fixed_point<T> res = exp(fixed_point<T>(val, src.fixed_point_position(), true) - max);
+            sum                      = add(sum, fixed_point<promoted_type>(res.raw(), src.fixed_point_position(), true));
+            return res.raw();
+        });
+
+        // Normalize
+        fixed_point<T> saturated_sum(sum);
+        std::transform(dst_row_ptr, dst_row_ptr + cols, dst_row_ptr, [&](T val)
+        {
+            return div(fixed_point<T>(val, src.fixed_point_position(), true), saturated_sum).raw();
+        });
+    }
+
+    return dst;
+}
+
+template SimpleTensor<float> softmax_layer(const SimpleTensor<float> &src);
+template SimpleTensor<half_float::half> softmax_layer(const SimpleTensor<half_float::half> &src);
+template SimpleTensor<qint8_t> softmax_layer(const SimpleTensor<qint8_t> &src);
+template SimpleTensor<qint16_t> softmax_layer(const SimpleTensor<qint16_t> &src);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/CPP/SoftmaxLayer.h b/tests/validation_new/CPP/SoftmaxLayer.h
new file mode 100644
index 0000000..28a532e
--- /dev/null
+++ b/tests/validation_new/CPP/SoftmaxLayer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_SOFTMAX_LAYER_H__
+#define __ARM_COMPUTE_TEST_SOFTMAX_LAYER_H__
+
+#include "tests/validation_new/Helpers.h"
+#include "tests/validation_new/SimpleTensor.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src);
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+SimpleTensor<T> softmax_layer(const SimpleTensor<T> &src);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_SOFTMAX_LAYER_H__ */
diff --git a/tests/validation_new/NEON/SoftmaxLayer.cpp b/tests/validation_new/NEON/SoftmaxLayer.cpp
new file mode 100644
index 0000000..ce5b8b8
--- /dev/null
+++ b/tests/validation_new/NEON/SoftmaxLayer.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "framework/Asserts.h"
+#include "framework/Macros.h"
+#include "framework/datasets/Datasets.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets_new/ShapeDatasets.h"
+#include "tests/validation_new/Validation.h"
+#include "tests/validation_new/fixtures/SoftmaxLayerFixture.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+/** Tolerance for float operations */
+constexpr float tolerance_f32 = 0.000001f;
+#ifdef ARM_COMPUTE_ENABLE_FP16
+const float tolerance_f16 = 0.0001f;
+#endif /* ARM_COMPUTE_ENABLE_FP16*/
+/** Tolerance for fixed point operations */
+constexpr int8_t tolerance_fixed_point = 2;
+
+/** CNN data types */
+const auto CNNDataTypes = framework::dataset::make("DataType",
+{
+#ifdef ARM_COMPUTE_ENABLE_FP16
+    DataType::F16,
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+    DataType::F32,
+    DataType::QS8,
+    DataType::QS16,
+});
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(SoftmaxLayer)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(concat(datasets::SmallShapes(), datasets::LargeShapes()), CNNDataTypes), shape, data_type)
+{
+    // Set fixed point position data type allowed
+    const int fixed_point_position = is_data_type_fixed_point(data_type) ? 3 : 0;
+
+    // Create tensors
+    Tensor src = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
+    Tensor dst = create_tensor<Tensor>(shape, data_type, 1, fixed_point_position);
+
+    ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    // Create and configure function
+    NESoftmaxLayer smx_layer;
+    smx_layer.configure(&src, &dst);
+
+    // Validate valid region
+    const ValidRegion valid_region = shape_to_valid_region(shape);
+    validate(src.info()->valid_region(), valid_region);
+    validate(dst.info()->valid_region(), valid_region);
+
+    // Validate padding
+    const int         step    = 16 / data_size_from_type(data_type);
+    const PaddingSize padding = PaddingCalculator(shape.x(), step).required_padding();
+    validate(src.info()->padding(), padding);
+    validate(dst.info()->padding(), padding);
+}
+
+template <typename T>
+using NESoftmaxLayerFixture = SoftmaxValidationFixture<Tensor, Accessor, NESoftmaxLayer, T>;
+
+TEST_SUITE(Float)
+#ifdef ARM_COMPUTE_ENABLE_FP16
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture<half_float::half>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixture<half_float::half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END()
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+template <typename T>
+using NESoftmaxLayerFixedPointFixture = SoftmaxValidationFixedPointFixture<Tensor, Accessor, NESoftmaxLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QS8)
+// Testing for fixed point position [1,6) as reciprocal limits the maximum fixed point position to 5
+FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
+                                                                                                                     DataType::QS8)),
+                                                                                                                     framework::dataset::make("FractionalBits", 1, 6)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_fixed_point);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType",
+                                                                                                                   DataType::QS8)),
+                                                                                                                   framework::dataset::make("FractionalBits", 1, 6)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_fixed_point);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+// Testing for fixed point position [1,14) as reciprocal limits the maximum fixed point position to 14
+FIXTURE_DATA_TEST_CASE(RunSmall, NESoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType",
+                                                                                                                      DataType::QS16)),
+                                                                                                                      framework::dataset::make("FractionalBits", 1, 14)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_fixed_point);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NESoftmaxLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType",
+                                                                                                                    DataType::QS16)),
+                                                                                                                    framework::dataset::make("FractionalBits", 1, 14)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_fixed_point);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/fixtures/SoftmaxLayerFixture.h b/tests/validation_new/fixtures/SoftmaxLayerFixture.h
new file mode 100644
index 0000000..c6f3d22
--- /dev/null
+++ b/tests/validation_new/fixtures/SoftmaxLayerFixture.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "framework/Asserts.h"
+#include "framework/Fixture.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation_new/CPP/SoftmaxLayer.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SoftmaxValidationFixedPointFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, DataType data_type, int fractional_bits)
+    {
+        _fractional_bits = fractional_bits;
+
+        _target    = compute_target(shape, data_type, fractional_bits);
+        _reference = compute_reference(shape, data_type, fractional_bits);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor)
+    {
+        if(_fractional_bits == 0)
+        {
+            std::uniform_real_distribution<> distribution(-1000.f, 1000.f);
+            library->fill(tensor, distribution, 0);
+        }
+        else
+        {
+            const int                       one_fixed = 1 << _fractional_bits;
+            std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
+            library->fill(tensor, distribution, 0);
+        }
+    }
+
+    TensorType compute_target(const TensorShape &shape, DataType data_type, int fixed_point_position = 0)
+    {
+        // Create tensors
+        TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
+        TensorType dst = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
+
+        // Create and configure function
+        FunctionType smx_layer;
+        smx_layer.configure(&src, &dst);
+
+        ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        src.allocator()->allocate();
+        dst.allocator()->allocate();
+
+        ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        fill(AccessorType(src));
+
+        // Compute function
+        smx_layer.run();
+
+        return dst;
+    }
+
+    SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, int fixed_point_position = 0)
+    {
+        // Create reference
+        SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position };
+
+        // Fill reference
+        fill(src);
+
+        return reference::softmax_layer<T>(src);
+    }
+
+    TensorType      _target{};
+    SimpleTensor<T> _reference{};
+    int             _fractional_bits{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class SoftmaxValidationFixture : public SoftmaxValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, DataType data_type)
+    {
+        SoftmaxValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, 0);
+    }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_SOFTMAX_LAYER_FIXTURE */