IVGCVSW-4618 'Transition Units Test Suites'

* Used doctest in android-nn-driver unit tests.

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I9b5d4dfd77d53c7ebee7f8c43628a1d6ff74d1a3
diff --git a/test/1.0/Convolution2D.cpp b/test/1.0/Convolution2D.cpp
index 9a5d239..c833d89 100644
--- a/test/1.0/Convolution2D.cpp
+++ b/test/1.0/Convolution2D.cpp
@@ -7,12 +7,11 @@
 #include "../Convolution2D.hpp"
 #include "../../1.0/HalPolicy.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <log/log.h>
 
 #include <OperationsUtils.h>
 
-BOOST_AUTO_TEST_SUITE(Convolution2DTests)
 
 using namespace android::hardware;
 using namespace driverTestHelpers;
@@ -29,14 +28,18 @@
 
 } // namespace driverTestHelpers
 
-BOOST_AUTO_TEST_CASE(ConvValidPadding_Hal_1_0)
+TEST_SUITE("Convolution2DTests_1.0")
+{
+
+
+TEST_CASE("ConvValidPadding_Hal_1_0")
 {
     PaddingTestImpl<hal_1_0::HalPolicy>(android::nn::kPaddingValid);
 }
 
-BOOST_AUTO_TEST_CASE(ConvSamePadding_Hal_1_0)
+TEST_CASE("ConvSamePadding_Hal_1_0")
 {
     PaddingTestImpl<hal_1_0::HalPolicy>(android::nn::kPaddingSame);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/1.0/FullyConnectedReshape.cpp b/test/1.0/FullyConnectedReshape.cpp
index 72c90ca..4585c95 100644
--- a/test/1.0/FullyConnectedReshape.cpp
+++ b/test/1.0/FullyConnectedReshape.cpp
@@ -6,37 +6,31 @@
 #include "../DriverTestHelpers.hpp"
 #include "../../1.0/FullyConnected.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(FullyConnectedReshapeTests)
-
-BOOST_AUTO_TEST_CASE(TestFlattenFullyConnectedInput)
+TEST_SUITE("FullyConnectedReshapeTests")
+{
+TEST_CASE("TestFlattenFullyConnectedInput")
 {
     using armnn::TensorShape;
 
     // Pass through 2d input
-    BOOST_TEST(FlattenFullyConnectedInput(TensorShape({2,2048}), TensorShape({512, 2048})) ==
-               TensorShape({2, 2048}));
+    CHECK(FlattenFullyConnectedInput(TensorShape({2,2048}), TensorShape({512, 2048})) == TensorShape({2, 2048}));
 
     // Trivial flattening of batched channels
-    BOOST_TEST(FlattenFullyConnectedInput(TensorShape({97,1,1,2048}), TensorShape({512, 2048})) ==
-               TensorShape({97, 2048}));
+    CHECK(FlattenFullyConnectedInput(TensorShape({97,1,1,2048}), TensorShape({512, 2048})) == TensorShape({97, 2048}));
 
     // Flatten single batch of rows
-    BOOST_TEST(FlattenFullyConnectedInput(TensorShape({1,97,1,2048}), TensorShape({512, 2048})) ==
-               TensorShape({97, 2048}));
+    CHECK(FlattenFullyConnectedInput(TensorShape({1,97,1,2048}), TensorShape({512, 2048})) == TensorShape({97, 2048}));
 
     // Flatten single batch of columns
-    BOOST_TEST(FlattenFullyConnectedInput(TensorShape({1,1,97,2048}), TensorShape({512, 2048})) ==
-               TensorShape({97, 2048}));
+    CHECK(FlattenFullyConnectedInput(TensorShape({1,1,97,2048}), TensorShape({512, 2048})) == TensorShape({97, 2048}));
 
     // Move batches into input dimension
-    BOOST_TEST(FlattenFullyConnectedInput(TensorShape({50,1,1,10}), TensorShape({512, 20})) ==
-               TensorShape({25, 20}));
+    CHECK(FlattenFullyConnectedInput(TensorShape({50,1,1,10}), TensorShape({512, 20})) == TensorShape({25, 20}));
 
     // Flatten single batch of 3D data (e.g. convolution output)
-    BOOST_TEST(FlattenFullyConnectedInput(TensorShape({1,16,16,10}), TensorShape({512, 2560})) ==
-               TensorShape({1, 2560}));
+    CHECK(FlattenFullyConnectedInput(TensorShape({1,16,16,10}), TensorShape({512, 2560})) == TensorShape({1, 2560}));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/1.0/Lstm.cpp b/test/1.0/Lstm.cpp
index 5f0a209..0833fd6 100644
--- a/test/1.0/Lstm.cpp
+++ b/test/1.0/Lstm.cpp
@@ -5,30 +5,46 @@
 
 #include "../Lstm.hpp"
 
-#include <boost/test/data/test_case.hpp>
-
-BOOST_AUTO_TEST_SUITE(LstmTests)
-
 using namespace armnn_driver;
 
-BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
+TEST_SUITE("LstmTests_1.0_CpuRef")
 {
-    LstmNoCifgNoPeepholeNoProjection<hal_1_0::HalPolicy>(sample);
+    TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.0_armnn::Compute::CpuRef")
+    {
+        LstmNoCifgNoPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.0_CpuRef")
+    {
+        LstmCifgPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.0_CpuRef")
+    {
+        LstmNoCifgPeepholeProjection<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.0_CpuRef")
+    {
+        LstmCifgPeepholeNoProjectionBatch2<hal_1_0::HalPolicy>(armnn::Compute::CpuRef);
+    }
 }
 
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
+#if defined(ARMCOMPUTECL_ENABLED)
+TEST_SUITE("LstmTests_1.0_GpuAcc")
 {
-    LstmCifgPeepholeNoProjection<hal_1_0::HalPolicy>(sample);
+    TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.0_GpuAcc")
+    {
+        LstmNoCifgNoPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.0_GpuAcc")
+    {
+        LstmCifgPeepholeNoProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.0_GpuAcc")
+    {
+        LstmNoCifgPeepholeProjection<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.0_GpuAcc")
+    {
+        LstmCifgPeepholeNoProjectionBatch2<hal_1_0::HalPolicy>(armnn::Compute::GpuAcc);
+    }
 }
-
-BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
-{
-    LstmNoCifgPeepholeProjection<hal_1_0::HalPolicy>(sample);
-}
-
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
-{
-    LstmCifgPeepholeNoProjectionBatch2<hal_1_0::HalPolicy>(sample);
-}
-
-BOOST_AUTO_TEST_SUITE_END()
+#endif
\ No newline at end of file
diff --git a/test/1.1/Convolution2D.cpp b/test/1.1/Convolution2D.cpp
index 32d5018..0daa472 100644
--- a/test/1.1/Convolution2D.cpp
+++ b/test/1.1/Convolution2D.cpp
@@ -7,12 +7,12 @@
 #include "../Convolution2D.hpp"
 #include "../../1.1/HalPolicy.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <log/log.h>
 
 #include <OperationsUtils.h>
 
-BOOST_AUTO_TEST_SUITE(Convolution2DTests)
+
 
 using namespace android::hardware;
 using namespace driverTestHelpers;
@@ -29,24 +29,28 @@
 
 } // namespace driverTestHelpers
 
-BOOST_AUTO_TEST_CASE(ConvValidPadding_Hal_1_1)
+
+TEST_SUITE("Convolution2DTests_1.1")
+{
+
+TEST_CASE("ConvValidPadding_Hal_1_1")
 {
     PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingValid);
 }
 
-BOOST_AUTO_TEST_CASE(ConvSamePadding_Hal_1_1)
+TEST_CASE("ConvSamePadding_Hal_1_1")
 {
     PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingSame);
 }
 
-BOOST_AUTO_TEST_CASE(ConvValidPaddingFp16Flag_Hal_1_1)
+TEST_CASE("ConvValidPaddingFp16Flag_Hal_1_1")
 {
     PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingValid, true);
 }
 
-BOOST_AUTO_TEST_CASE(ConvSamePaddingFp16Flag_Hal_1_1)
+TEST_CASE("ConvSamePaddingFp16Flag_Hal_1_1")
 {
     PaddingTestImpl<hal_1_1::HalPolicy>(android::nn::kPaddingSame, true);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/1.1/Lstm.cpp b/test/1.1/Lstm.cpp
index 703597e..2699ec4 100644
--- a/test/1.1/Lstm.cpp
+++ b/test/1.1/Lstm.cpp
@@ -5,30 +5,46 @@
 
 #include "../Lstm.hpp"
 
-#include <boost/test/data/test_case.hpp>
-
-BOOST_AUTO_TEST_SUITE(LstmTests)
-
 using namespace armnn_driver;
 
-BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
+TEST_SUITE("LstmTests_1.1_CpuRef")
 {
-    LstmNoCifgNoPeepholeNoProjection<hal_1_1::HalPolicy>(sample);
+    TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.1_armnn::Compute::CpuRef")
+    {
+        LstmNoCifgNoPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.1_CpuRef")
+    {
+        LstmCifgPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.1_CpuRef")
+    {
+        LstmNoCifgPeepholeProjection<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.1_CpuRef")
+    {
+        LstmCifgPeepholeNoProjectionBatch2<hal_1_1::HalPolicy>(armnn::Compute::CpuRef);
+    }
 }
 
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
+#if defined(ARMCOMPUTECL_ENABLED)
+TEST_SUITE("LstmTests_1.1_GpuAcc")
 {
-    LstmCifgPeepholeNoProjection<hal_1_1::HalPolicy>(sample);
+    TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.1_GpuAcc")
+    {
+        LstmNoCifgNoPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.1_GpuAcc")
+    {
+        LstmCifgPeepholeNoProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.1_GpuAcc")
+    {
+        LstmNoCifgPeepholeProjection<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.1_GpuAcc")
+    {
+        LstmCifgPeepholeNoProjectionBatch2<hal_1_1::HalPolicy>(armnn::Compute::GpuAcc);
+    }
 }
-
-BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
-{
-    LstmNoCifgPeepholeProjection<hal_1_1::HalPolicy>(sample);
-}
-
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
-{
-    LstmCifgPeepholeNoProjectionBatch2<hal_1_1::HalPolicy>(sample);
-}
-
-BOOST_AUTO_TEST_SUITE_END()
+#endif
diff --git a/test/1.1/Mean.cpp b/test/1.1/Mean.cpp
index c9a5a6d..c7c5a9b 100644
--- a/test/1.1/Mean.cpp
+++ b/test/1.1/Mean.cpp
@@ -8,12 +8,10 @@
 
 #include "../1.1/HalPolicy.hpp"
 
-#include <boost/test/data/test_case.hpp>
+#include <doctest/doctest.h>
 
 #include <array>
 
-BOOST_AUTO_TEST_SUITE(MeanTests)
-
 using namespace android::hardware;
 using namespace driverTestHelpers;
 using namespace armnn_driver;
@@ -24,12 +22,6 @@
 namespace
 {
 
-#ifndef ARMCOMPUTECL_ENABLED
-    static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-    static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
-#endif
-
 void MeanTestImpl(const TestTensor& input,
                   const hidl_vec<uint32_t>& axisDimensions,
                   const int32_t* axisValues,
@@ -94,64 +86,175 @@
     if (preparedModel.get() != nullptr)
     {
         V1_0::ErrorStatus execStatus = Execute(preparedModel, request);
-        BOOST_TEST(execStatus == V1_0::ErrorStatus::NONE);
+        CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
     }
 
     const float* expectedOutputData = expectedOutput.GetData();
     for (unsigned int i = 0; i < expectedOutput.GetNumElements(); i++)
     {
-        BOOST_TEST(outputData[i] == expectedOutputData[i]);
+        CHECK(outputData[i] == expectedOutputData[i]);
     }
 }
 
 } // anonymous namespace
 
-BOOST_DATA_TEST_CASE(MeanNoKeepDimsTest, COMPUTE_DEVICES)
+TEST_SUITE("MeanTests_CpuRef")
 {
-    TestTensor input{ armnn::TensorShape{ 4, 3, 2 }, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
-                                                       11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
-                                                       20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
-    hidl_vec<uint32_t> axisDimensions = { 2 };
-    int32_t axisValues[] = { 0, 1 };
-    int32_t keepDims = 0;
-    TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+    TEST_CASE("MeanNoKeepDimsTest_CpuRef")
+    {
+        TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+                          { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+                            11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+                            20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 2 };
+        int32_t axisValues[] = { 0, 1 };
+        int32_t keepDims = 0;
+        TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
 
-    MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, sample);
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuRef);
+    }
+
+    TEST_CASE("MeanKeepDimsTest_CpuRef")
+    {
+        TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 1 };
+        int32_t axisValues[] = { 2 };
+        int32_t keepDims = 1;
+        TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, {  2.0f, 2.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuRef);
+    }
+
+    TEST_CASE("MeanFp16NoKeepDimsTest_CpuRef")
+    {
+        TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+                          { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+                            11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+                            20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 2 };
+        int32_t axisValues[] = { 0, 1 };
+        int32_t keepDims = 0;
+        TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef);
+    }
+
+    TEST_CASE("MeanFp16KeepDimsTest_CpuRef")
+    {
+        TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 1 };
+        int32_t axisValues[] = { 2 };
+        int32_t keepDims = 1;
+        TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, {  2.0f, 2.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuRef);
+    }
 }
 
-BOOST_DATA_TEST_CASE(MeanKeepDimsTest, COMPUTE_DEVICES)
+#ifdef ARMCOMPUTECL_ENABLED
+TEST_SUITE("MeanTests_CpuAcc")
 {
-    TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
-    hidl_vec<uint32_t> axisDimensions = { 1 };
-    int32_t axisValues[] = { 2 };
-    int32_t keepDims = 1;
-    TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, {  2.0f, 2.0f } };
+    TEST_CASE("MeanNoKeepDimsTest_CpuAcc")
+    {
+        TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+                          { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+                            11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+                            20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 2 };
+        int32_t axisValues[] = { 0, 1 };
+        int32_t keepDims = 0;
+        TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
 
-    MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, sample);
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuAcc);
+    }
+
+    TEST_CASE("MeanKeepDimsTest_CpuAcc")
+    {
+        TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 1 };
+        int32_t axisValues[] = { 2 };
+        int32_t keepDims = 1;
+        TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, {  2.0f, 2.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::CpuAcc);
+    }
+
+    TEST_CASE("MeanFp16NoKeepDimsTest_CpuAcc")
+    {
+        TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+                          { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+                            11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+                            20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 2 };
+        int32_t axisValues[] = { 0, 1 };
+        int32_t keepDims = 0;
+        TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc);
+    }
+
+    TEST_CASE("MeanFp16KeepDimsTest_CpuAcc")
+    {
+        TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 1 };
+        int32_t axisValues[] = { 2 };
+        int32_t keepDims = 1;
+        TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, {  2.0f, 2.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::CpuAcc);
+    }
 }
 
-BOOST_DATA_TEST_CASE(MeanFp16NoKeepDimsTest, COMPUTE_DEVICES)
+TEST_SUITE("MeanTests_GpuAcc")
 {
-    TestTensor input{ armnn::TensorShape{ 4, 3, 2 }, { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
-                                                       11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
-                                                       20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
-    hidl_vec<uint32_t> axisDimensions = { 2 };
-    int32_t axisValues[] = { 0, 1 };
-    int32_t keepDims = 0;
-    TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+    TEST_CASE("MeanNoKeepDimsTest_GpuAcc")
+    {
+        TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+                          { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+                            11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+                            20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 2 };
+        int32_t axisValues[] = { 0, 1 };
+        int32_t keepDims = 0;
+        TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
 
-    MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, sample);
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::GpuAcc);
+    }
+
+    TEST_CASE("MeanKeepDimsTest_GpuAcc")
+    {
+        TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 1 };
+        int32_t axisValues[] = { 2 };
+        int32_t keepDims = 1;
+        TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, {  2.0f, 2.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, false, armnn::Compute::GpuAcc);
+    }
+
+    TEST_CASE("MeanFp16NoKeepDimsTest_GpuAcc")
+    {
+        TestTensor input{ armnn::TensorShape{ 4, 3, 2 },
+                          { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+                            11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f,
+                            20.0f, 21.0f, 22.0f, 23.0f, 24.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 2 };
+        int32_t axisValues[] = { 0, 1 };
+        int32_t keepDims = 0;
+        TestTensor expectedOutput{ armnn::TensorShape{ 2 }, { 12.0f, 13.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc);
+    }
+
+    TEST_CASE("MeanFp16KeepDimsTest_GpuAcc")
+    {
+        TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
+        hidl_vec<uint32_t> axisDimensions = { 1 };
+        int32_t axisValues[] = { 2 };
+        int32_t keepDims = 1;
+        TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, {  2.0f, 2.0f } };
+
+        MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, armnn::Compute::GpuAcc);
+    }
 }
-
-BOOST_DATA_TEST_CASE(MeanFp16KeepDimsTest, COMPUTE_DEVICES)
-{
-    TestTensor input{ armnn::TensorShape{ 1, 1, 3, 2 }, { 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f } };
-    hidl_vec<uint32_t> axisDimensions = { 1 };
-    int32_t axisValues[] = { 2 };
-    int32_t keepDims = 1;
-    TestTensor expectedOutput{ armnn::TensorShape{ 1, 1, 1, 2 }, {  2.0f, 2.0f } };
-
-    MeanTestImpl(input, axisDimensions, axisValues, keepDims, expectedOutput, true, sample);
-}
-
-BOOST_AUTO_TEST_SUITE_END()
+#endif
diff --git a/test/1.1/Transpose.cpp b/test/1.1/Transpose.cpp
index 206f9b9..4c4dc34 100644
--- a/test/1.1/Transpose.cpp
+++ b/test/1.1/Transpose.cpp
@@ -9,16 +9,13 @@
 
 #include "../1.1/HalPolicy.hpp"
 
-#include <boost/test/unit_test.hpp>
-#include <boost/test/data/test_case.hpp>
+#include <doctest/doctest.h>
 
 #include <log/log.h>
 
 #include <array>
 #include <cmath>
 
-BOOST_AUTO_TEST_SUITE(TransposeTests)
-
 using namespace android::hardware;
 using namespace driverTestHelpers;
 using namespace armnn_driver;
@@ -29,12 +26,6 @@
 namespace
 {
 
-#ifndef ARMCOMPUTECL_ENABLED
-    static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-    static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
-#endif
-
 void TransposeTestImpl(const TestTensor & inputs, int32_t perm[],
                        const TestTensor & expectedOutputTensor, armnn::Compute computeDevice)
 {
@@ -98,38 +89,97 @@
     const float * expectedOutput = expectedOutputTensor.GetData();
     for (unsigned int i = 0; i < expectedOutputTensor.GetNumElements(); ++i)
     {
-        BOOST_TEST(outdata[i] == expectedOutput[i]);
+        CHECK(outdata[i] == expectedOutput[i]);
     }
 }
 
 } // namespace
 
-BOOST_DATA_TEST_CASE(Transpose , COMPUTE_DEVICES)
+TEST_SUITE("TransposeTests_CpuRef")
 {
-    int32_t perm[] = {2, 3, 1, 0};
-    TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
-    TestTensor expected{armnn::TensorShape{2, 2, 2, 1},{1, 5, 2, 6, 3, 7, 4, 8}};
+    TEST_CASE("Transpose_CpuRef")
+    {
+        int32_t perm[] = {2, 3, 1, 0};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+        TestTensor expected{armnn::TensorShape{2, 2, 2, 1},{1, 5, 2, 6, 3, 7, 4, 8}};
 
-    TransposeTestImpl(input, perm, expected, sample);
+        TransposeTestImpl(input, perm, expected, armnn::Compute::CpuRef);
+    }
+
+    TEST_CASE("TransposeNHWCToArmNN_CpuRef")
+    {
+        int32_t perm[] = {0, 3, 1, 2};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
+        TestTensor expected{armnn::TensorShape{1, 3, 2, 2},{1, 11, 21, 31, 2, 12, 22, 32, 3, 13, 23, 33}};
+
+        TransposeTestImpl(input, perm, expected, armnn::Compute::CpuRef);
+    }
+    TEST_CASE("TransposeArmNNToNHWC_CpuRef")
+    {
+        int32_t perm[] = {0, 2, 3, 1};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+        TestTensor expected{armnn::TensorShape{1, 2, 2, 2},{1, 5, 2, 6, 3, 7, 4, 8}};
+
+        TransposeTestImpl(input, perm, expected, armnn::Compute::CpuRef);
+    }
 }
 
-BOOST_DATA_TEST_CASE(TransposeNHWCToArmNN , COMPUTE_DEVICES)
+#ifdef ARMCOMPUTECL_ENABLED
+TEST_SUITE("TransposeTests_CpuAcc")
 {
-    int32_t perm[] = {0, 3, 1, 2};
-    TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
-    TestTensor expected{armnn::TensorShape{1, 3, 2, 2},{1, 11, 21, 31, 2, 12, 22, 32, 3, 13, 23, 33}};
+    TEST_CASE("Transpose_CpuAcc")
+    {
+        int32_t perm[] = {2, 3, 1, 0};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+        TestTensor expected{armnn::TensorShape{2, 2, 2, 1},{1, 5, 2, 6, 3, 7, 4, 8}};
 
-    TransposeTestImpl(input, perm, expected, sample);
+        TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
+    }
+
+    TEST_CASE("TransposeNHWCToArmNN_CpuAcc")
+    {
+        int32_t perm[] = {0, 3, 1, 2};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
+        TestTensor expected{armnn::TensorShape{1, 3, 2, 2},{1, 11, 21, 31, 2, 12, 22, 32, 3, 13, 23, 33}};
+
+        TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
+    }
+    TEST_CASE("TransposeArmNNToNHWC_CpuAcc")
+    {
+        int32_t perm[] = {0, 2, 3, 1};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+        TestTensor expected{armnn::TensorShape{1, 2, 2, 2},{1, 5, 2, 6, 3, 7, 4, 8}};
+
+        TransposeTestImpl(input, perm, expected, armnn::Compute::CpuAcc);
+    }
 }
-
-BOOST_DATA_TEST_CASE(TransposeArmNNToNHWC , COMPUTE_DEVICES)
+TEST_SUITE("TransposeTests_GpuAcc")
 {
-    int32_t perm[] = {0, 2, 3, 1};
-    TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
-    TestTensor expected{armnn::TensorShape{1, 2, 2, 2},{1, 5, 2, 6, 3, 7, 4, 8}};
+    TEST_CASE("Transpose_GpuAcc")
+    {
+        int32_t perm[] = {2, 3, 1, 0};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+        TestTensor expected{armnn::TensorShape{2, 2, 2, 1},{1, 5, 2, 6, 3, 7, 4, 8}};
 
-    TransposeTestImpl(input, perm, expected, sample);
+        TransposeTestImpl(input, perm, expected, armnn::Compute::GpuAcc);
+    }
+
+    TEST_CASE("TransposeNHWCToArmNN_GpuAcc")
+    {
+        int32_t perm[] = {0, 3, 1, 2};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 3},{1, 2, 3, 11, 12, 13, 21, 22, 23, 31, 32, 33}};
+        TestTensor expected{armnn::TensorShape{1, 3, 2, 2},{1, 11, 21, 31, 2, 12, 22, 32, 3, 13, 23, 33}};
+
+        TransposeTestImpl(input, perm, expected, armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("TransposeArmNNToNHWC_GpuAcc")
+    {
+        int32_t perm[] = {0, 2, 3, 1};
+        TestTensor input{armnn::TensorShape{1, 2, 2, 2},{1, 2, 3, 4, 5, 6, 7, 8}};
+        TestTensor expected{armnn::TensorShape{1, 2, 2, 2},{1, 5, 2, 6, 3, 7, 4, 8}};
+
+        TransposeTestImpl(input, perm, expected, armnn::Compute::GpuAcc);
+    }
 }
-
-BOOST_AUTO_TEST_SUITE_END()
+#endif
 
diff --git a/test/1.2/Capabilities.cpp b/test/1.2/Capabilities.cpp
index 15ecf96..aa0c642 100644
--- a/test/1.2/Capabilities.cpp
+++ b/test/1.2/Capabilities.cpp
@@ -9,7 +9,31 @@
 
 #include <armnn/utility/Assert.hpp>
 
-#include <boost/test/unit_test.hpp>
+// Un-define some of the macros as they clash in 'third-party/doctest/doctest.h'
+// and 'system/core/base/include/android-base/logging.h'
+// macro redefined error[-Werror,-Wmacro-redefined]
+#ifdef CHECK
+#undef CHECK
+#endif
+#ifdef CHECK_EQ
+#undef CHECK_EQ
+#endif
+#ifdef CHECK_NE
+#undef CHECK_NE
+#endif
+#ifdef CHECK_GT
+#undef CHECK_GT
+#endif
+#ifdef CHECK_LT
+#undef CHECK_LT
+#endif
+#ifdef CHECK_GE
+#undef CHECK_GE
+#endif
+#ifdef CHECK_LE
+#undef CHECK_LE
+#endif
+#include <doctest/doctest.h>
 
 #include <sys/system_properties.h>
 
@@ -66,9 +90,9 @@
     ARMNN_ASSERT(perfInfo.powerUsage == powerUsage);
 }
 
-BOOST_FIXTURE_TEST_SUITE(CapabilitiesTests, CapabilitiesFixture)
-
-BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesWithRuntime)
+TEST_SUITE("CapabilitiesTests")
+{
+TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesWithRuntime")
 {
     using namespace armnn_driver::hal_1_2;
     using namespace android::nn;
@@ -124,7 +148,7 @@
     ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
 }
 
-BOOST_AUTO_TEST_CASE(PerformanceCapabilitiesUndefined)
+TEST_CASE_FIXTURE(CapabilitiesFixture, "PerformanceCapabilitiesUndefined")
 {
     using namespace armnn_driver::hal_1_2;
     using namespace android::nn;
@@ -164,4 +188,4 @@
     ArmnnDriverImpl::getCapabilities_1_2(runtime, getCapabilitiesFn);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/test/1.2/Dilation.cpp b/test/1.2/Dilation.cpp
index 1a7ba4b..e1cde9f 100644
--- a/test/1.2/Dilation.cpp
+++ b/test/1.2/Dilation.cpp
@@ -7,11 +7,11 @@
 
 #include "../../1.2/HalPolicy.hpp"
 
-#include <boost/test/data/test_case.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(DilationTests)
-
-BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingNoDilation)
+TEST_SUITE("DilationTests")
+{
+TEST_CASE("ConvolutionExplicitPaddingNoDilation")
 {
     DilationTestOptions options;
     options.m_IsDepthwiseConvolution = false;
@@ -21,7 +21,7 @@
     DilationTestImpl<hal_1_2::HalPolicy>(options);
 }
 
-BOOST_AUTO_TEST_CASE(ConvolutionExplicitPaddingDilation)
+TEST_CASE("ConvolutionExplicitPaddingDilation")
 {
     DilationTestOptions options;
     options.m_IsDepthwiseConvolution = false;
@@ -31,7 +31,7 @@
     DilationTestImpl<hal_1_2::HalPolicy>(options);
 }
 
-BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingNoDilation)
+TEST_CASE("ConvolutionImplicitPaddingNoDilation")
 {
     DilationTestOptions options;
     options.m_IsDepthwiseConvolution = false;
@@ -41,7 +41,7 @@
     DilationTestImpl<hal_1_2::HalPolicy>(options);
 }
 
-BOOST_AUTO_TEST_CASE(ConvolutionImplicitPaddingDilation)
+TEST_CASE("ConvolutionImplicitPaddingDilation")
 {
     DilationTestOptions options;
     options.m_IsDepthwiseConvolution = false;
@@ -51,7 +51,7 @@
     DilationTestImpl<hal_1_2::HalPolicy>(options);
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingNoDilation)
+TEST_CASE("DepthwiseConvolutionExplicitPaddingNoDilation")
 {
     DilationTestOptions options;
     options.m_IsDepthwiseConvolution = true;
@@ -61,7 +61,7 @@
     DilationTestImpl<hal_1_2::HalPolicy>(options);
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionExplicitPaddingDilation)
+TEST_CASE("DepthwiseConvolutionExplicitPaddingDilation")
 {
     DilationTestOptions options;
     options.m_IsDepthwiseConvolution = true;
@@ -71,7 +71,7 @@
     DilationTestImpl<hal_1_2::HalPolicy>(options);
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingNoDilation)
+TEST_CASE("DepthwiseConvolutionImplicitPaddingNoDilation")
 {
     DilationTestOptions options;
     options.m_IsDepthwiseConvolution = true;
@@ -81,7 +81,7 @@
     DilationTestImpl<hal_1_2::HalPolicy>(options);
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionImplicitPaddingDilation)
+TEST_CASE("DepthwiseConvolutionImplicitPaddingDilation")
 {
     DilationTestOptions options;
     options.m_IsDepthwiseConvolution = true;
@@ -91,4 +91,4 @@
     DilationTestImpl<hal_1_2::HalPolicy>(options);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/test/1.2/Lstm.cpp b/test/1.2/Lstm.cpp
index 03f7fe4..70fbf70 100644
--- a/test/1.2/Lstm.cpp
+++ b/test/1.2/Lstm.cpp
@@ -5,47 +5,54 @@
 
 #include "../Lstm.hpp"
 
-#include <boost/test/data/test_case.hpp>
-
-BOOST_AUTO_TEST_SUITE(LstmTests)
-
 using namespace armnn_driver;
 
-BOOST_DATA_TEST_CASE(LstmNoCifgNoPeepholeNoProjectionTest, COMPUTE_DEVICES)
+TEST_SUITE("LstmTests_1.2_CpuRef")
 {
-    LstmNoCifgNoPeepholeNoProjection<hal_1_2::HalPolicy>(sample);
-}
-
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionTest, COMPUTE_DEVICES)
-{
-    LstmCifgPeepholeNoProjection<hal_1_2::HalPolicy>(sample);
-}
-
-BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionTest, COMPUTE_DEVICES)
-{
-    LstmNoCifgPeepholeProjection<hal_1_2::HalPolicy>(sample);
-}
-
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeNoProjectionBatch2Test, COMPUTE_DEVICES)
-{
-    LstmCifgPeepholeNoProjectionBatch2<hal_1_2::HalPolicy>(sample);
-}
-
-BOOST_DATA_TEST_CASE(LstmNoCifgPeepholeProjectionNoClippingLayerNormTest, COMPUTE_DEVICES)
-{
-    LstmNoCifgPeepholeProjectionNoClippingLayerNorm<hal_1_2::HalPolicy>(sample);
-}
-
-BOOST_DATA_TEST_CASE(LstmCifgPeepholeProjectionNoClippingLayerNormTest, COMPUTE_DEVICES)
-{
-    LstmCifgPeepholeProjectionNoClippingLayerNorm<hal_1_2::HalPolicy>(sample);
+    TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.2_armnn::Compute::CpuRef")
+    {
+        LstmNoCifgNoPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.2_CpuRef")
+    {
+        LstmCifgPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.2_CpuRef")
+    {
+        LstmNoCifgPeepholeProjection<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.2_CpuRef")
+    {
+        LstmCifgPeepholeNoProjectionBatch2<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+    }
+    TEST_CASE("QuantizedLstmTest_1.2_CpuRef")
+    {
+        QuantizedLstm<hal_1_2::HalPolicy>(armnn::Compute::CpuRef);
+    }
 }
 
 #if defined(ARMCOMPUTECL_ENABLED)
-BOOST_DATA_TEST_CASE(QuantizedLstmTest, COMPUTE_DEVICES)
+TEST_SUITE("LstmTests_1.2_GpuAcc")
 {
-    QuantizedLstm<hal_1_2::HalPolicy>(sample);
+    TEST_CASE("LstmNoCifgNoPeepholeNoProjectionTest_1.2_GpuAcc")
+    {
+        LstmNoCifgNoPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionTest_1.2_GpuAcc")
+    {
+        LstmCifgPeepholeNoProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmNoCifgPeepholeProjectionTest_1.2_GpuAcc")
+    {
+        LstmNoCifgPeepholeProjection<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+    TEST_CASE("LstmCifgPeepholeNoProjectionBatch2Test_1.2_GpuAcc")
+    {
+        LstmCifgPeepholeNoProjectionBatch2<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+    }
+        TEST_CASE("QuantizedLstmTest_1.2_GpuAcc")
+    {
+        QuantizedLstm<hal_1_2::HalPolicy>(armnn::Compute::GpuAcc);
+    }
 }
 #endif
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/1.3/QLstm.cpp b/test/1.3/QLstm.cpp
index 27e52a6..82acba6 100644
--- a/test/1.3/QLstm.cpp
+++ b/test/1.3/QLstm.cpp
@@ -10,14 +10,10 @@
 
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
-#include <boost/test/data/test_case.hpp>
-#include <boost/math/special_functions/relative_difference.hpp>
+#include <doctest/doctest.h>
 
 #include <array>
 
-BOOST_AUTO_TEST_SUITE(QLSTMTests)
-
 using ArmnnDriver   = armnn_driver::ArmnnDriver;
 using DriverOptions = armnn_driver::DriverOptions;
 
@@ -26,6 +22,8 @@
 
 using HalPolicy = hal_1_3::HalPolicy;
 
+static const float TOLERANCE = 1.0f;
+
 namespace
 {
 
@@ -42,26 +40,6 @@
     return inputRequestArgument;
 }
 
-// Returns true if the relative difference between two float values is less than the tolerance value given.
-// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
-bool TolerantCompareEqual(float a, float b, float tolerance = 1.0f)
-{
-    float rd;
-    if (a == 0.0f)
-    {
-        rd = fabs(b);
-    }
-    else if (b == 0.0f)
-    {
-        rd = fabs(a);
-    }
-    else
-    {
-        rd = boost::math::relative_difference(a, b);
-    }
-    return rd < tolerance;
-}
-
 // Helper function to create an OperandLifeTime::NO_VALUE for testing.
 // To be used on optional input operands that have no values - these are valid and should be tested.
 HalPolicy::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
@@ -85,12 +63,6 @@
     }
 }
 
-#ifndef ARMCOMPUTECL_ENABLED
-static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::CpuAcc }};
-#endif
-
 // Add our own tests here since we skip the qlstm tests which Google supplies (because of non-const weights)
 void QLstmTestImpl(const hidl_vec<uint32_t>&   inputDimensions,
                    const std::vector<int8_t>&   inputValue,
@@ -527,8 +499,8 @@
     // check the results
     for (size_t i = 0; i < outputStateOutValue.size(); ++i)
     {
-        BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
-                   "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
+        CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ).epsilon(TOLERANCE),
+                      "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
     }
 
     // CELL STATE OUTPUT Does not match currently: IVGCVSW-4860 Verify remaining VTS tests (2) for QLSTM
@@ -541,8 +513,8 @@
 
     for (size_t i = 0; i < outputValue.size(); ++i)
     {
-        BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
-                   "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+        CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ).epsilon(TOLERANCE),
+                      "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
     }
 }
 
@@ -1028,19 +1000,34 @@
 } // anonymous namespace
 
 // Support is not added yet
-//BOOST_DATA_TEST_CASE(QLSTMWithProjectionTest, COMPUTE_DEVICES)
+//TEST_CASE(QLSTMWithProjectionTest, COMPUTE_DEVICES)
 //{
 //     QLstmWithProjection(sample);
 //}
 
-BOOST_DATA_TEST_CASE(QLSTMWithNoProjectionTest, COMPUTE_DEVICES)
+TEST_SUITE("QLSTMTests_CpuRef")
 {
-    QLstmWithNoProjection(sample);
-}
+    TEST_CASE("QLSTMWithNoProjectionTest_CpuRef")
+    {
+        QLstmWithNoProjection(armnn::Compute::CpuRef);
+    }
 
-BOOST_DATA_TEST_CASE(DynamicOutputQLSTMWithNoProjectionTest, COMPUTE_DEVICES)
+    TEST_CASE("DynamicOutputQLstmWithNoProjection_CpuRef")
+    {
+        DynamicOutputQLstmWithNoProjection(armnn::Compute::CpuRef);
+    }
+}
+#ifdef ARMCOMPUTECL_ENABLED
+TEST_SUITE("QLSTMTests_CpuAcc")
 {
-    DynamicOutputQLstmWithNoProjection(sample);
-}
+    TEST_CASE("QLSTMWithNoProjectionTest_CpuAcc")
+    {
+        QLstmWithNoProjection(armnn::Compute::CpuAcc);
+    }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+    TEST_CASE("DynamicOutputQLstmWithNoProjection_CpuAcc")
+    {
+        DynamicOutputQLstmWithNoProjection(armnn::Compute::CpuAcc);
+    }
+}
+#endif
diff --git a/test/1.3/QosTests.cpp b/test/1.3/QosTests.cpp
index 9fd6688..3b06405 100644
--- a/test/1.3/QosTests.cpp
+++ b/test/1.3/QosTests.cpp
@@ -10,12 +10,10 @@
 
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
-#include <boost/test/data/test_case.hpp>
+#include <doctest/doctest.h>
 
-
-BOOST_AUTO_TEST_SUITE(QosTests)
-
+TEST_SUITE("QosTests")
+{
 using ArmnnDriver   = armnn_driver::ArmnnDriver;
 using DriverOptions = armnn_driver::DriverOptions;
 
@@ -40,13 +38,7 @@
     }
 }
 
-#ifndef ARMCOMPUTECL_ENABLED
-static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::CpuAcc }};
-#endif
-
-BOOST_AUTO_TEST_CASE(ConcurrentExecuteWithQosPriority)
+TEST_CASE("ConcurrentExecuteWithQosPriority")
 {
     ALOGI("ConcurrentExecuteWithQOSPriority: entry");
 
@@ -102,7 +94,7 @@
         preparedModelsSize++;
     }
 
-    BOOST_TEST(maxRequests == preparedModelsSize);
+    CHECK(maxRequests == preparedModelsSize);
 
     // construct the request data
     V1_0::DataLocation inloc = {};
@@ -172,15 +164,15 @@
     {
         if (i < 15)
         {
-            BOOST_TEST(outdata[i][0] == 152);
+            CHECK(outdata[i][0] == 152);
         }
         else if (i < 30)
         {
-            BOOST_TEST(outdata[i][0] == 141);
+            CHECK(outdata[i][0] == 141);
         }
         else
         {
-            BOOST_TEST(outdata[i][0] == 159);
+            CHECK(outdata[i][0] == 159);
         }
 
     }
@@ -189,4 +181,4 @@
 
 } // anonymous namespace
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/test/Concat.cpp b/test/Concat.cpp
index 54ee8a2..d39375a 100644
--- a/test/Concat.cpp
+++ b/test/Concat.cpp
@@ -7,15 +7,11 @@
 
 #include "../1.0/HalPolicy.hpp"
 
-#include <boost/test/unit_test.hpp>
-#include <boost/test/data/test_case.hpp>
+#include <doctest/doctest.h>
 
 #include <array>
 #include <log/log.h>
 
-
-BOOST_AUTO_TEST_SUITE(ConcatTests)
-
 using namespace android::hardware;
 using namespace driverTestHelpers;
 using namespace armnn_driver;
@@ -26,12 +22,6 @@
 namespace
 {
 
-#ifndef ARMCOMPUTECL_ENABLED
-    static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-    static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
-#endif
-
 void
 ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
                 int32_t concatAxis,
@@ -61,19 +51,19 @@
     model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
 
     // make the prepared model
-    V1_0::ErrorStatus prepareStatus=V1_0::ErrorStatus::NONE;
+    V1_0::ErrorStatus prepareStatus = V1_0::ErrorStatus::NONE;
     android::sp<V1_0::IPreparedModel> preparedModel = PrepareModelWithStatus(model,
                                                                              *driver,
                                                                              prepareStatus,
                                                                              expectedPrepareStatus);
-    BOOST_TEST(prepareStatus == expectedPrepareStatus);
+    CHECK((int)prepareStatus == (int)expectedPrepareStatus);
     if (prepareStatus != V1_0::ErrorStatus::NONE)
     {
         // prepare failed, we cannot continue
         return;
     }
 
-    BOOST_TEST(preparedModel.get() != nullptr);
+    CHECK(preparedModel.get() != nullptr);
     if (preparedModel.get() == nullptr)
     {
         // don't spoil other tests if prepare failed
@@ -132,7 +122,7 @@
     // run the execution
     ARMNN_ASSERT(preparedModel.get() != nullptr);
     auto execStatus = Execute(preparedModel, request, expectedExecStatus);
-    BOOST_TEST(execStatus == expectedExecStatus);
+    CHECK((int)execStatus == (int)expectedExecStatus);
 
     if (execStatus == V1_0::ErrorStatus::NONE)
     {
@@ -140,359 +130,607 @@
         const float * expectedOutput = expectedOutputTensor.GetData();
         for (unsigned int i=0; i<expectedOutputTensor.GetNumElements();++i)
         {
-            BOOST_TEST(outdata[i] == expectedOutput[i]);
+            CHECK(outdata[i] == expectedOutput[i]);
         }
     }
 }
 
-} // namespace <anonymous>
-
-
-BOOST_DATA_TEST_CASE(SimpleConcatAxis0, COMPUTE_DEVICES)
+/// Test cases...
+void SimpleConcatAxis0(armnn::Compute computeDevice)
 {
     int32_t axis = 0;
-    TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{3,1,1,1},{0,1,2}};
-
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    TestTensor expected{armnn::TensorShape{3, 1, 1, 1}, {0, 1, 2}};
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(ConcatAxis0_NoInterleave, COMPUTE_DEVICES)
+void ConcatAxis0NoInterleave(armnn::Compute computeDevice)
 {
     int32_t axis = 0;
-    TestTensor aIn{armnn::TensorShape{2,1,2,1},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{3,1,2,1},{4,  5,
-                                                6,  7,
-                                                8,  9}};
-    TestTensor cIn{armnn::TensorShape{1,1,2,1},{10, 11}};
+    TestTensor aIn{armnn::TensorShape{2, 1, 2, 1}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{3, 1, 2, 1}, {4, 5,
+                                                    6, 7,
+                                                    8, 9}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10, 11}};
 
-    TestTensor expected{armnn::TensorShape{6,1,2,1},{0,  1,
-                                                     2,  3,
-                                                     4,  5,
-                                                     6,  7,
-                                                     8,  9,
-                                                     10, 11}};
+    TestTensor expected{armnn::TensorShape{6, 1, 2, 1}, {0, 1,
+                                                         2, 3,
+                                                         4, 5,
+                                                         6, 7,
+                                                         8, 9,
+                                                         10, 11}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxis1, COMPUTE_DEVICES)
+void SimpleConcatAxis1(armnn::Compute computeDevice)
 {
     int32_t axis = 1;
-    TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{1,3,1,1},{0,1,2}};
+    TestTensor expected{armnn::TensorShape{1, 3, 1, 1}, {0, 1, 2}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(ConcatAxis1_NoInterleave, COMPUTE_DEVICES)
+void ConcatAxis1NoInterleave(armnn::Compute computeDevice)
 {
     int32_t axis = 1;
-    TestTensor aIn{armnn::TensorShape{1,2,2,1},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{1,3,2,1},{4,  5,
-                                                6,  7,
-                                                8,  9}};
-    TestTensor cIn{armnn::TensorShape{1,1,2,1},{10, 11}};
+    TestTensor aIn{armnn::TensorShape{1, 2, 2, 1}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{1, 3, 2, 1}, {4, 5,
+                                                    6, 7,
+                                                    8, 9}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10, 11}};
 
-    TestTensor expected{armnn::TensorShape{1,6,2,1},{0,  1,
-                                                     2,  3,
-                                                     4,  5,
-                                                     6,  7,
-                                                     8,  9,
-                                                     10, 11}};
+    TestTensor expected{armnn::TensorShape{1, 6, 2, 1}, {0, 1,
+                                                         2, 3,
+                                                         4, 5,
+                                                         6, 7,
+                                                         8, 9,
+                                                         10, 11}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxis1_DoInterleave, COMPUTE_DEVICES)
+void SimpleConcatAxis1DoInterleave(armnn::Compute computeDevice)
 {
     int32_t axis = 1;
-    TestTensor aIn{armnn::TensorShape{2,2,1,1},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{2,3,1,1},{4,  5,  6,
-                                                7,  8,  9}};
-    TestTensor cIn{armnn::TensorShape{2,1,1,1},{10,
-                                                11}};
+    TestTensor aIn{armnn::TensorShape{2, 2, 1, 1}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{2, 3, 1, 1}, {4, 5, 6,
+                                                    7, 8, 9}};
+    TestTensor cIn{armnn::TensorShape{2, 1, 1, 1}, {10,
+                                                    11}};
 
-    TestTensor expected{armnn::TensorShape{2,6,1,1},{0, 1, 4, 5, 6, 10,
-                                                     2, 3, 7, 8, 9, 11}};
+    TestTensor expected{armnn::TensorShape{2, 6, 1, 1}, {0, 1, 4, 5, 6, 10,
+                                                         2, 3, 7, 8, 9, 11}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxis2, COMPUTE_DEVICES)
+void SimpleConcatAxis2(armnn::Compute computeDevice)
 {
     int32_t axis = 2;
-    TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{1,1,3,1},{0,1,2}};
+    TestTensor expected{armnn::TensorShape{1, 1, 3, 1}, {0, 1, 2}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(ConcatAxis2_NoInterleave, COMPUTE_DEVICES)
+void ConcatAxis2NoInterleave(armnn::Compute computeDevice)
 {
     int32_t axis = 2;
-    TestTensor aIn{armnn::TensorShape{1,1,2,2},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{1,1,3,2},{4,  5,
-                                                6,  7,
-                                                8,  9}};
-    TestTensor cIn{armnn::TensorShape{1,1,1,2},{10, 11}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 3, 2}, {4, 5,
+                                                    6, 7,
+                                                    8, 9}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 1, 2}, {10, 11}};
 
-    TestTensor expected{armnn::TensorShape{1,1,6,2},{0,  1,
-                                                     2,  3,
-                                                     4,  5,
-                                                     6,  7,
-                                                     8,  9,
-                                                     10, 11}};
+    TestTensor expected{armnn::TensorShape{1, 1, 6, 2}, {0, 1,
+                                                         2, 3,
+                                                         4, 5,
+                                                         6, 7,
+                                                         8, 9,
+                                                         10, 11}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxis2_DoInterleave, COMPUTE_DEVICES)
+void SimpleConcatAxis2DoInterleave(armnn::Compute computeDevice)
 {
     int32_t axis = 2;
-    TestTensor aIn{armnn::TensorShape{1,2,2,1},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{1,2,3,1},{4,  5,  6,
-                                                7,  8,  9}};
-    TestTensor cIn{armnn::TensorShape{1,2,1,1},{10,
-                                                11}};
+    TestTensor aIn{armnn::TensorShape{1, 2, 2, 1}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{1, 2, 3, 1}, {4, 5, 6,
+                                                    7, 8, 9}};
+    TestTensor cIn{armnn::TensorShape{1, 2, 1, 1}, {10,
+                                                    11}};
 
-    TestTensor expected{armnn::TensorShape{1,2,6,1},{0, 1, 4, 5, 6, 10,
-                                                     2, 3, 7, 8, 9, 11}};
+    TestTensor expected{armnn::TensorShape{1, 2, 6, 1}, {0, 1, 4, 5, 6, 10,
+                                                         2, 3, 7, 8, 9, 11}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxis3, COMPUTE_DEVICES)
+void SimpleConcatAxis3(armnn::Compute computeDevice)
 {
     int32_t axis = 3;
-    TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1,1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{1,1,1,3},{0,1,2}};
+    TestTensor expected{armnn::TensorShape{1, 1, 1, 3}, {0, 1, 2}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxis3_DoInterleave, COMPUTE_DEVICES)
+void SimpleConcatAxis3DoInterleave(armnn::Compute computeDevice)
 {
     int32_t axis = 3;
-    TestTensor aIn{armnn::TensorShape{1,1,2,2},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{1,1,2,3},{4,  5,  6,
-                                                7,  8,  9}};
-    TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
-                                                11}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+                                                    7, 8, 9}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
+                                                    11}};
 
-    TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
-                                                     2, 3, 7, 8, 9, 11}};
+    TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
+                                                         2, 3, 7, 8, 9, 11}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(AxisTooBig, COMPUTE_DEVICES)
+void AxisTooBig(armnn::Compute computeDevice)
 {
     int32_t axis = 4;
-    TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1,1},{0}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
 
     // The axis must be within the range of [-rank(values), rank(values))
     // see: https://www.tensorflow.org/api_docs/python/tf/concat
-    TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
+    TestTensor uncheckedOutput{armnn::TensorShape{1, 1, 1, 1}, {0}};
     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
-    ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
+    ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, computeDevice, expectedParserStatus);
 }
 
-BOOST_DATA_TEST_CASE(AxisTooSmall, COMPUTE_DEVICES)
+void AxisTooSmall(armnn::Compute computeDevice)
 {
     int32_t axis = -5;
-    TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1,1},{0}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
 
     // The axis must be within the range of [-rank(values), rank(values))
     // see: https://www.tensorflow.org/api_docs/python/tf/concat
-    TestTensor uncheckedOutput{armnn::TensorShape{1,1,1,1},{0}};
+    TestTensor uncheckedOutput{armnn::TensorShape{1, 1, 1, 1}, {0}};
     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
-    ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, sample, expectedParserStatus);
+    ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, computeDevice, expectedParserStatus);
 }
 
-BOOST_DATA_TEST_CASE(TooFewInputs, COMPUTE_DEVICES)
+void TooFewInputs(armnn::Compute computeDevice)
 {
     int32_t axis = 0;
-    TestTensor aIn{armnn::TensorShape{1,1,1,1},{0}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
 
     // We need at least two tensors to concatenate
     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
-    ConcatTestImpl({&aIn}, axis, aIn, sample, expectedParserStatus);
+    ConcatTestImpl({&aIn}, axis, aIn, computeDevice, expectedParserStatus);
 }
 
-BOOST_DATA_TEST_CASE(MismatchedInputDimensions, COMPUTE_DEVICES)
+void MismatchedInputDimensions(armnn::Compute computeDevice)
 {
     int32_t axis = 3;
-    TestTensor aIn{armnn::TensorShape{1,1,2,2},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{1,1,2,3},{4,  5,  6,
-                                                7,  8,  9}};
-    TestTensor mismatched{armnn::TensorShape{1,1,1,1},{10}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+                                                    7, 8, 9}};
+    TestTensor mismatched{armnn::TensorShape{1, 1, 1, 1}, {10}};
 
-    TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
-                                                     2, 3, 7, 8, 9, 11}};
+    TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
+                                                         2, 3, 7, 8, 9, 11}};
 
     // The input dimensions must be compatible
     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
-    ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, sample, expectedParserStatus);
+    ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, computeDevice, expectedParserStatus);
 }
 
-BOOST_DATA_TEST_CASE(MismatchedInputRanks, COMPUTE_DEVICES)
+void MismatchedInputRanks(armnn::Compute computeDevice)
 {
     int32_t axis = 2;
-    TestTensor aIn{armnn::TensorShape{1,1,2},{0,1}};
-    TestTensor bIn{armnn::TensorShape{1,1},{4}};
-    TestTensor expected{armnn::TensorShape{1,1,3},{0,1,4}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 2}, {0, 1}};
+    TestTensor bIn{armnn::TensorShape{1, 1}, {4}};
+    TestTensor expected{armnn::TensorShape{1, 1, 3}, {0, 1, 4}};
 
     // The input dimensions must be compatible
     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
-    ConcatTestImpl({&aIn, &bIn}, axis, expected, sample, expectedParserStatus);
+    ConcatTestImpl({&aIn, &bIn}, axis, expected, computeDevice, expectedParserStatus);
 }
 
-BOOST_DATA_TEST_CASE(MismatchedOutputDimensions, COMPUTE_DEVICES)
+void MismatchedOutputDimensions(armnn::Compute computeDevice)
 {
     int32_t axis = 3;
-    TestTensor aIn{armnn::TensorShape{1,1,2,2},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{1,1,2,3},{4,  5,  6,
-                                                7,  8,  9}};
-    TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
-                                                11}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+                                                    7, 8, 9}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
+                                                    11}};
 
-    TestTensor mismatched{armnn::TensorShape{1,1,6,2},{0, 1, 4, 5, 6, 10,
-                                                       2, 3, 7, 8, 9, 11}};
+    TestTensor mismatched{armnn::TensorShape{1, 1, 6, 2}, {0, 1, 4, 5, 6, 10,
+                                                           2, 3, 7, 8, 9, 11}};
 
     // The input and output dimensions must be compatible
     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, computeDevice, expectedParserStatus);
 }
 
-BOOST_DATA_TEST_CASE(MismatchedOutputRank, COMPUTE_DEVICES)
+void MismatchedOutputRank(armnn::Compute computeDevice)
 {
     int32_t axis = 3;
-    TestTensor aIn{armnn::TensorShape{1,1,2,2},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{1,1,2,3},{4,  5,  6,
-                                                7,  8,  9}};
-    TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
-                                                11}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+                                                    7, 8, 9}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
+                                                    11}};
 
-    TestTensor mismatched{armnn::TensorShape{6,2},{0, 1, 4, 5, 6, 10,
-                                                   2, 3, 7, 8, 9, 11}};
+    TestTensor mismatched{armnn::TensorShape{6, 2}, {0, 1, 4, 5, 6, 10,
+                                                     2, 3, 7, 8, 9, 11}};
 
     // The input and output ranks must match
     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, sample, expectedParserStatus);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, computeDevice, expectedParserStatus);
 }
 
-BOOST_DATA_TEST_CASE(ValidNegativeAxis, COMPUTE_DEVICES)
+void ValidNegativeAxis(armnn::Compute computeDevice)
 {
     // this is the same as 3
     // see: https://www.tensorflow.org/api_docs/python/tf/concat
     int32_t axis = -1;
-    TestTensor aIn{armnn::TensorShape{1,1,2,2},{0,  1,
-                                                2,  3}};
-    TestTensor bIn{armnn::TensorShape{1,1,2,3},{4,  5,  6,
-                                                7,  8,  9}};
-    TestTensor cIn{armnn::TensorShape{1,1,2,1},{10,
-                                                11}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
+                                                    2, 3}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
+                                                    7, 8, 9}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
+                                                    11}};
 
-    TestTensor expected{armnn::TensorShape{1,1,2,6},{0, 1, 4, 5, 6, 10,
-                                                     2, 3, 7, 8, 9, 11}};
+    TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
+                                                         2, 3, 7, 8, 9, 11}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxisZero3D, COMPUTE_DEVICES)
+void SimpleConcatAxisZero3D(armnn::Compute computeDevice)
 {
     int32_t axis = 0;
-    TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{3,1,1},{0,1,2}};
+    TestTensor expected{armnn::TensorShape{3, 1, 1}, {0, 1, 2}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxisOne3D, COMPUTE_DEVICES)
+void SimpleConcatAxisOne3D(armnn::Compute computeDevice)
 {
     int32_t axis = 1;
-    TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{1,3,1},{0,1,2}};
+    TestTensor expected{armnn::TensorShape{1, 3, 1}, {0, 1, 2}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxisTwo3D, COMPUTE_DEVICES)
+void SimpleConcatAxisTwo3D(armnn::Compute computeDevice)
 {
     int32_t axis = 2;
-    TestTensor aIn{armnn::TensorShape{1,1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{1,1,3},{0,1,2}};
+    TestTensor expected{armnn::TensorShape{1, 1, 3}, {0, 1, 2}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxisZero2D, COMPUTE_DEVICES)
+void SimpleConcatAxisZero2D(armnn::Compute computeDevice)
 {
     int32_t axis = 0;
-    TestTensor aIn{armnn::TensorShape{1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{3,1},{0,1,2}};
+    TestTensor expected{armnn::TensorShape{3, 1}, {0, 1, 2}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxisOne2D, COMPUTE_DEVICES)
+void SimpleConcatAxisOne2D(armnn::Compute computeDevice)
 {
     int32_t axis = 1;
-    TestTensor aIn{armnn::TensorShape{1,1},{0}};
-    TestTensor bIn{armnn::TensorShape{1,1},{1}};
-    TestTensor cIn{armnn::TensorShape{1,1},{2}};
+    TestTensor aIn{armnn::TensorShape{1, 1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1, 1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1, 1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{1,3},{0,1,2}};
+    TestTensor expected{armnn::TensorShape{1, 3}, {0, 1, 2}};
 
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_DATA_TEST_CASE(SimpleConcatAxisZero1D, COMPUTE_DEVICES)
+void SimpleConcatAxisZero1D(armnn::Compute computeDevice)
 {
     int32_t axis = 0;
-    TestTensor aIn{armnn::TensorShape{1},{0}};
-    TestTensor bIn{armnn::TensorShape{1},{1}};
-    TestTensor cIn{armnn::TensorShape{1},{2}};
+    TestTensor aIn{armnn::TensorShape{1}, {0}};
+    TestTensor bIn{armnn::TensorShape{1}, {1}};
+    TestTensor cIn{armnn::TensorShape{1}, {2}};
 
-    TestTensor expected{armnn::TensorShape{3},{0,1,2}};
-
-    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, sample);
+    TestTensor expected{armnn::TensorShape{3}, {0, 1, 2}};
+    ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+} // namespace <anonymous>
+
+TEST_SUITE("ConcatTests_CpuRef")
+{
+
+TEST_CASE("SimpleConcatAxis0")
+{
+    SimpleConcatAxis0(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("ConcatAxis0NoInterleave")
+{
+    ConcatAxis0NoInterleave(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxis1")
+{
+    SimpleConcatAxis1(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("ConcatAxis1NoInterleave")
+{
+    ConcatAxis1NoInterleave(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxis1DoInterleave")
+{
+    SimpleConcatAxis1DoInterleave(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxis2")
+{
+    SimpleConcatAxis2(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("ConcatAxis2NoInterleave")
+{
+    ConcatAxis2NoInterleave(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxis2DoInterleave")
+{
+    SimpleConcatAxis2DoInterleave(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxis3")
+{
+    SimpleConcatAxis3(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxis3DoInterleave")
+{
+    SimpleConcatAxis3DoInterleave(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("AxisTooBig")
+{
+    AxisTooBig(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("AxisTooSmall")
+{
+    AxisTooSmall(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("TooFewInputs")
+{
+    TooFewInputs(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("MismatchedInputDimensions")
+{
+    MismatchedInputDimensions(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("MismatchedInputRanks")
+{
+    MismatchedInputRanks(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("MismatchedOutputDimensions")
+{
+    MismatchedOutputDimensions(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("MismatchedOutputRank")
+{
+    MismatchedOutputRank(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("ValidNegativeAxis")
+{
+    ValidNegativeAxis(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxisZero3D")
+{
+    SimpleConcatAxisZero3D(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxisOne3D")
+{
+    SimpleConcatAxisOne3D(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxisTwo3D")
+{
+    SimpleConcatAxisTwo3D(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxisZero2D")
+{
+    SimpleConcatAxisZero2D(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxisOne2D")
+{
+    SimpleConcatAxisOne2D(armnn::Compute::CpuRef);
+}
+
+TEST_CASE("SimpleConcatAxisZero1D")
+{
+    SimpleConcatAxisZero1D(armnn::Compute::CpuRef);
+}
+
+}
+
+#ifdef ARMCOMPUTECL_ENABLED
+TEST_SUITE("ConcatTests_GpuAcc")
+{
+
+TEST_CASE("SimpleConcatAxis0")
+{
+    SimpleConcatAxis0(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("ConcatAxis0NoInterleave")
+{
+    ConcatAxis0NoInterleave(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxis1")
+{
+    SimpleConcatAxis1(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("ConcatAxis1NoInterleave")
+{
+    ConcatAxis1NoInterleave(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxis1DoInterleave")
+{
+    SimpleConcatAxis1DoInterleave(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxis2")
+{
+    SimpleConcatAxis2(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("ConcatAxis2NoInterleave")
+{
+    ConcatAxis2NoInterleave(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxis2DoInterleave")
+{
+    SimpleConcatAxis2DoInterleave(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxis3")
+{
+    SimpleConcatAxis3(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxis3DoInterleave")
+{
+    SimpleConcatAxis3DoInterleave(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("AxisTooBig")
+{
+    AxisTooBig(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("AxisTooSmall")
+{
+    AxisTooSmall(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("TooFewInputs")
+{
+    TooFewInputs(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("MismatchedInputDimensions")
+{
+    MismatchedInputDimensions(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("MismatchedInputRanks")
+{
+    MismatchedInputRanks(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("MismatchedOutputDimensions")
+{
+    MismatchedOutputDimensions(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("MismatchedOutputRank")
+{
+    MismatchedOutputRank(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("ValidNegativeAxis")
+{
+    ValidNegativeAxis(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxisZero3D")
+{
+    SimpleConcatAxisZero3D(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxisOne3D")
+{
+    SimpleConcatAxisOne3D(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxisTwo3D")
+{
+    SimpleConcatAxisTwo3D(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxisZero2D")
+{
+    SimpleConcatAxisZero2D(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxisOne2D")
+{
+    SimpleConcatAxisOne2D(armnn::Compute::GpuAcc);
+}
+
+TEST_CASE("SimpleConcatAxisZero1D")
+{
+    SimpleConcatAxisZero1D(armnn::Compute::GpuAcc);
+}
+
+}// End of GpuAcc Test Suite
+#endif
\ No newline at end of file
diff --git a/test/Concurrent.cpp b/test/Concurrent.cpp
index 50ba0e9..2ea6eb0 100644
--- a/test/Concurrent.cpp
+++ b/test/Concurrent.cpp
@@ -6,12 +6,11 @@
 
 #include "../1.0/HalPolicy.hpp"
 
-#include <boost/test/unit_test.hpp>
-
+#include <doctest/doctest.h>
 #include <log/log.h>
 
-BOOST_AUTO_TEST_SUITE(ConcurrentDriverTests)
-
+TEST_SUITE("ConcurrentDriverTests")
+{
 using ArmnnDriver   = armnn_driver::ArmnnDriver;
 using DriverOptions = armnn_driver::DriverOptions;
 using HalPolicy     = armnn_driver::hal_1_0::HalPolicy;
@@ -26,7 +25,7 @@
 // The main point of this test is to check that multiple requests can be
 // executed without waiting for the callback from previous execution.
 // The operations performed are not significant.
-BOOST_AUTO_TEST_CASE(ConcurrentExecute)
+TEST_CASE("ConcurrentExecute")
 {
     ALOGI("ConcurrentExecute: entry");
 
@@ -64,7 +63,7 @@
         }
     }
 
-    BOOST_TEST(maxRequests == preparedModelsSize);
+    CHECK(maxRequests == preparedModelsSize);
 
     // construct the request data
     V1_0::DataLocation inloc = {};
@@ -119,9 +118,9 @@
     ALOGI("ConcurrentExecute: validating results");
     for (size_t i = 0; i < maxRequests; ++i)
     {
-        BOOST_TEST(outdata[i][0] == 152);
+        CHECK(outdata[i][0] == 152);
     }
     ALOGI("ConcurrentExecute: exit");
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/Convolution2D.hpp b/test/Convolution2D.hpp
index c3f9d48..540cdd7 100644
--- a/test/Convolution2D.hpp
+++ b/test/Convolution2D.hpp
@@ -7,13 +7,11 @@
 
 #include "DriverTestHelpers.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <log/log.h>
 
 #include <OperationsUtils.h>
 
-BOOST_AUTO_TEST_SUITE(Convolution2DTests)
-
 using namespace android::hardware;
 using namespace driverTestHelpers;
 using namespace armnn_driver;
@@ -25,11 +23,11 @@
 #define ARMNN_ANDROID_FP16_TEST(result, fp16Expectation, fp32Expectation, fp16Enabled) \
    if (fp16Enabled) \
    { \
-       BOOST_TEST((result == fp16Expectation || result == fp32Expectation), result << \
+       CHECK_MESSAGE((result == fp16Expectation || result == fp32Expectation), result << \
        " does not match either " << fp16Expectation << "[fp16] or " << fp32Expectation << "[fp32]"); \
    } else \
    { \
-      BOOST_TEST(result == fp32Expectation); \
+      CHECK(result == fp32Expectation); \
    }
 
 void SetModelFp16Flag(V1_0::Model& model, bool fp16Enabled);
@@ -55,22 +53,22 @@
 
     // add operands
     float weightValue[] = {1.f, -1.f, 0.f, 1.f};
-    float biasValue[]   = {0.f};
+    float biasValue[] = {0.f};
 
-    AddInputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 3, 1});
-    AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 2, 2, 1}, weightValue);
-    AddTensorOperand<HalPolicy>(model, hidl_vec<uint32_t>{1}, biasValue);
-    AddIntOperand<HalPolicy>(model, (int32_t)paddingScheme); // padding
+    AddInputOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 2, 3, 1});
+    AddTensorOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 2, 2, 1}, weightValue);
+    AddTensorOperand<HalPolicy>(model, hidl_vec < uint32_t > {1}, biasValue);
+    AddIntOperand<HalPolicy>(model, (int32_t) paddingScheme); // padding
     AddIntOperand<HalPolicy>(model, 2); // stride x
     AddIntOperand<HalPolicy>(model, 2); // stride y
     AddIntOperand<HalPolicy>(model, 0); // no activation
-    AddOutputOperand<HalPolicy>(model, hidl_vec<uint32_t>{1, 1, outSize, 1});
+    AddOutputOperand<HalPolicy>(model, hidl_vec < uint32_t > {1, 1, outSize, 1});
 
     // make the convolution operation
     model.operations.resize(1);
     model.operations[0].type = HalOperationType::CONV_2D;
-    model.operations[0].inputs  = hidl_vec<uint32_t>{0, 1, 2, 3, 4, 5, 6};
-    model.operations[0].outputs = hidl_vec<uint32_t>{7};
+    model.operations[0].inputs = hidl_vec < uint32_t > {0, 1, 2, 3, 4, 5, 6};
+    model.operations[0].outputs = hidl_vec < uint32_t > {7};
 
     // make the prepared model
     SetModelFp16Flag(model, fp16Enabled);
@@ -78,24 +76,24 @@
 
     // construct the request
     V1_0::DataLocation inloc = {};
-    inloc.poolIndex          = 0;
-    inloc.offset             = 0;
-    inloc.length             = 6 * sizeof(float);
-    RequestArgument input    = {};
-    input.location           = inloc;
-    input.dimensions         = hidl_vec<uint32_t>{};
+    inloc.poolIndex = 0;
+    inloc.offset = 0;
+    inloc.length = 6 * sizeof(float);
+    RequestArgument input = {};
+    input.location = inloc;
+    input.dimensions = hidl_vec < uint32_t > {};
 
     V1_0::DataLocation outloc = {};
-    outloc.poolIndex          = 1;
-    outloc.offset             = 0;
-    outloc.length             = outSize * sizeof(float);
-    RequestArgument output    = {};
-    output.location           = outloc;
-    output.dimensions         = hidl_vec<uint32_t>{};
+    outloc.poolIndex = 1;
+    outloc.offset = 0;
+    outloc.length = outSize * sizeof(float);
+    RequestArgument output = {};
+    output.location = outloc;
+    output.dimensions = hidl_vec < uint32_t > {};
 
     V1_0::Request request = {};
-    request.inputs  = hidl_vec<RequestArgument>{input};
-    request.outputs = hidl_vec<RequestArgument>{output};
+    request.inputs = hidl_vec < RequestArgument > {input};
+    request.outputs = hidl_vec < RequestArgument > {output};
 
     // set the input data (matching source test)
     float indata[] = {1024.25f, 1.f, 0.f, 3.f, -1, -1024.25f};
@@ -114,19 +112,17 @@
     // check the result
     switch (paddingScheme)
     {
-    case android::nn::kPaddingValid:
-        ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
-        break;
-    case android::nn::kPaddingSame:
-        ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
-        BOOST_TEST(outdata[1] == 0.f);
-        break;
-    default:
-        BOOST_TEST(false);
-        break;
+        case android::nn::kPaddingValid:
+            ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
+            break;
+        case android::nn::kPaddingSame:
+            ARMNN_ANDROID_FP16_TEST(outdata[0], 1022.f, 1022.25f, fp16Enabled)
+            CHECK(outdata[1] == 0.f);
+            break;
+        default:
+            CHECK(false);
+            break;
     }
 }
 
 } // namespace driverTestHelpers
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/Dilation.hpp b/test/Dilation.hpp
index d0189c9..a05dba4 100644
--- a/test/Dilation.hpp
+++ b/test/Dilation.hpp
@@ -10,14 +10,11 @@
 #include <armnn/LayerVisitorBase.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <numeric>
 
-BOOST_AUTO_TEST_SUITE(DilationTests)
-
 using namespace armnn;
-using namespace boost;
 using namespace driverTestHelpers;
 
 struct DilationTestOptions
@@ -82,8 +79,8 @@
     template<typename ConvolutionDescriptor>
     void CheckDilationParams(const ConvolutionDescriptor& descriptor)
     {
-        BOOST_CHECK_EQUAL(descriptor.m_DilationX, m_ExpectedDilationX);
-        BOOST_CHECK_EQUAL(descriptor.m_DilationY, m_ExpectedDilationY);
+        CHECK_EQ(descriptor.m_DilationX, m_ExpectedDilationX);
+        CHECK_EQ(descriptor.m_DilationY, m_ExpectedDilationY);
     }
 };
 
@@ -169,11 +166,9 @@
     data.m_OutputSlotForOperand = std::vector<IOutputSlot*>(model.operands.size(), nullptr);
 
     bool ok = HalPolicy::ConvertOperation(model.operations[0], model, data);
-    BOOST_CHECK(ok);
+    CHECK(ok);
 
     // check if dilation params are as expected
     DilationTestVisitor visitor = options.m_HasDilation ? DilationTestVisitor(2, 2) : DilationTestVisitor();
     data.m_Network->Accept(visitor);
 }
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/test/DriverTestHelpers.cpp b/test/DriverTestHelpers.cpp
index 8e8d7be..44e6e72 100644
--- a/test/DriverTestHelpers.cpp
+++ b/test/DriverTestHelpers.cpp
@@ -4,7 +4,8 @@
 //
 #include "DriverTestHelpers.hpp"
 #include <log/log.h>
-#include <boost/test/unit_test.hpp>
+
+#include <doctest/doctest.h>
 
 namespace android
 {
@@ -139,10 +140,10 @@
     driver.prepareModel(model, cb);
 
     prepareStatus = cb->GetErrorStatus();
-    BOOST_TEST(prepareStatus == expectedStatus);
+    CHECK((int)prepareStatus == (int)expectedStatus);
     if (expectedStatus == V1_0::ErrorStatus::NONE)
     {
-        BOOST_TEST((cb->GetPreparedModel() != nullptr));
+        CHECK((cb->GetPreparedModel() != nullptr));
     }
     return cb->GetPreparedModel();
 }
@@ -158,10 +159,10 @@
     driver.prepareModel_1_1(model, V1_1::ExecutionPreference::LOW_POWER, cb);
 
     prepareStatus = cb->GetErrorStatus();
-    BOOST_TEST(prepareStatus == expectedStatus);
+    CHECK((int)prepareStatus == (int)expectedStatus);
     if (expectedStatus == V1_0::ErrorStatus::NONE)
     {
-        BOOST_TEST((cb->GetPreparedModel() != nullptr));
+        CHECK((cb->GetPreparedModel() != nullptr));
     }
     return cb->GetPreparedModel();
 }
@@ -184,10 +185,10 @@
     driver.prepareModel_1_2(model, V1_1::ExecutionPreference::LOW_POWER, emptyHandle1, emptyHandle2, emptyToken, cb);
 
     prepareStatus = cb->GetErrorStatus();
-    BOOST_TEST(prepareStatus == expectedStatus);
+    CHECK((int)prepareStatus == (int)expectedStatus);
     if (expectedStatus == V1_0::ErrorStatus::NONE)
     {
-        BOOST_TEST((cb->GetPreparedModel_1_2() != nullptr));
+        CHECK((cb->GetPreparedModel_1_2() != nullptr));
     }
     return cb->GetPreparedModel_1_2();
 }
@@ -219,7 +220,7 @@
     prepareStatus = cb->Get_1_3_ErrorStatus();
     if (prepareStatus == V1_3::ErrorStatus::NONE)
     {
-        BOOST_TEST((cb->GetPreparedModel_1_3() != nullptr));
+        CHECK((cb->GetPreparedModel_1_3() != nullptr));
     }
     return cb->GetPreparedModel_1_3();
 }
@@ -230,10 +231,10 @@
                           const V1_0::Request& request,
                           V1_0::ErrorStatus expectedStatus)
 {
-    BOOST_TEST(preparedModel.get() != nullptr);
+    CHECK(preparedModel.get() != nullptr);
     android::sp<ExecutionCallback> cb(new ExecutionCallback());
     V1_0::ErrorStatus execStatus = preparedModel->execute(request, cb);
-    BOOST_TEST(execStatus == expectedStatus);
+    CHECK((int)execStatus == (int)expectedStatus);
     ALOGI("Execute: waiting for callback to be invoked");
     cb->wait();
     return execStatus;
@@ -242,9 +243,10 @@
 android::sp<ExecutionCallback> ExecuteNoWait(android::sp<V1_0::IPreparedModel> preparedModel,
                                              const V1_0::Request& request)
 {
-    BOOST_TEST(preparedModel.get() != nullptr);
+    CHECK(preparedModel.get() != nullptr);
     android::sp<ExecutionCallback> cb(new ExecutionCallback());
-    BOOST_TEST(preparedModel->execute(request, cb) == V1_0::ErrorStatus::NONE);
+    V1_0::ErrorStatus execStatus = preparedModel->execute(request, cb);
+    CHECK((int)execStatus == (int)V1_0::ErrorStatus::NONE);
     ALOGI("ExecuteNoWait: returning callback object");
     return cb;
 }
diff --git a/test/DriverTestHelpers.hpp b/test/DriverTestHelpers.hpp
index d37fbf2..36deeab 100644
--- a/test/DriverTestHelpers.hpp
+++ b/test/DriverTestHelpers.hpp
@@ -10,10 +10,33 @@
 
 #include "../ArmnnDriver.hpp"
 #include <iosfwd>
-#include <boost/test/unit_test.hpp>
-
 #include <android/hidl/allocator/1.0/IAllocator.h>
 
+// Un-define some of the macros as they clash in 'third-party/doctest/doctest.h'
+// and 'system/core/base/include/android-base/logging.h'
+// macro redefined error[-Werror,-Wmacro-redefined]
+#ifdef CHECK
+#undef CHECK
+#endif
+#ifdef CHECK_EQ
+#undef CHECK_EQ
+#endif
+#ifdef CHECK_NE
+#undef CHECK_NE
+#endif
+#ifdef CHECK_GT
+#undef CHECK_GT
+#endif
+#ifdef CHECK_LT
+#undef CHECK_LT
+#endif
+#ifdef CHECK_GE
+#undef CHECK_GE
+#endif
+#ifdef CHECK_LE
+#undef CHECK_LE
+#endif
+
 using RequestArgument = V1_0::RequestArgument;
 using ::android::hidl::allocator::V1_0::IAllocator;
 
@@ -167,7 +190,7 @@
 
     android::sp<IAllocator> allocator = IAllocator::getService("ashmem");
     allocator->allocate(sizeof(T) * size, [&](bool success, const hidl_memory& mem) {
-        BOOST_TEST(success);
+        ARMNN_ASSERT(success);
         pool = mem;
     });
 
diff --git a/test/FullyConnected.cpp b/test/FullyConnected.cpp
index a68a587..704de44 100644
--- a/test/FullyConnected.cpp
+++ b/test/FullyConnected.cpp
@@ -6,12 +6,12 @@
 
 #include "../1.0/HalPolicy.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <log/log.h>
 
-BOOST_AUTO_TEST_SUITE(FullyConnectedTests)
-
+TEST_SUITE("FullyConnectedTests")
+{
 using namespace android::hardware;
 using namespace driverTestHelpers;
 using namespace armnn_driver;
@@ -19,7 +19,7 @@
 using HalPolicy = hal_1_0::HalPolicy;
 
 // Add our own test here since we fail the fc tests which Google supplies (because of non-const weights)
-BOOST_AUTO_TEST_CASE(FullyConnected)
+TEST_CASE("FullyConnected")
 {
     // this should ideally replicate fully_connected_float.model.cpp
     // but that uses slightly weird dimensions which I don't think we need to support for now
@@ -83,10 +83,10 @@
     }
 
     // check the result
-    BOOST_TEST(outdata[0] == 152);
+    CHECK(outdata[0] == 152);
 }
 
-BOOST_AUTO_TEST_CASE(TestFullyConnected4dInput)
+TEST_CASE("TestFullyConnected4dInput")
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
@@ -165,17 +165,17 @@
     }
 
     // check the result
-    BOOST_TEST(outdata[0] == 1);
-    BOOST_TEST(outdata[1] == 2);
-    BOOST_TEST(outdata[2] == 3);
-    BOOST_TEST(outdata[3] == 4);
-    BOOST_TEST(outdata[4] == 5);
-    BOOST_TEST(outdata[5] == 6);
-    BOOST_TEST(outdata[6] == 7);
-    BOOST_TEST(outdata[7] == 8);
+    CHECK(outdata[0] == 1);
+    CHECK(outdata[1] == 2);
+    CHECK(outdata[2] == 3);
+    CHECK(outdata[3] == 4);
+    CHECK(outdata[4] == 5);
+    CHECK(outdata[5] == 6);
+    CHECK(outdata[6] == 7);
+    CHECK(outdata[7] == 8);
 }
 
-BOOST_AUTO_TEST_CASE(TestFullyConnected4dInputReshape)
+TEST_CASE("TestFullyConnected4dInputReshape")
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
@@ -254,17 +254,17 @@
     }
 
     // check the result
-    BOOST_TEST(outdata[0] == 1);
-    BOOST_TEST(outdata[1] == 2);
-    BOOST_TEST(outdata[2] == 3);
-    BOOST_TEST(outdata[3] == 4);
-    BOOST_TEST(outdata[4] == 5);
-    BOOST_TEST(outdata[5] == 6);
-    BOOST_TEST(outdata[6] == 7);
-    BOOST_TEST(outdata[7] == 8);
+    CHECK(outdata[0] == 1);
+    CHECK(outdata[1] == 2);
+    CHECK(outdata[2] == 3);
+    CHECK(outdata[3] == 4);
+    CHECK(outdata[4] == 5);
+    CHECK(outdata[5] == 6);
+    CHECK(outdata[6] == 7);
+    CHECK(outdata[7] == 8);
 }
 
-BOOST_AUTO_TEST_CASE(TestFullyConnectedWeightsAsInput)
+TEST_CASE("TestFullyConnectedWeightsAsInput")
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
@@ -366,14 +366,14 @@
     }
 
     // check the result
-    BOOST_TEST(outdata[0] == 1);
-    BOOST_TEST(outdata[1] == 2);
-    BOOST_TEST(outdata[2] == 3);
-    BOOST_TEST(outdata[3] == 4);
-    BOOST_TEST(outdata[4] == 5);
-    BOOST_TEST(outdata[5] == 6);
-    BOOST_TEST(outdata[6] == 7);
-    BOOST_TEST(outdata[7] == 8);
+    CHECK(outdata[0] == 1);
+    CHECK(outdata[1] == 2);
+    CHECK(outdata[2] == 3);
+    CHECK(outdata[3] == 4);
+    CHECK(outdata[4] == 5);
+    CHECK(outdata[5] == 6);
+    CHECK(outdata[6] == 7);
+    CHECK(outdata[7] == 8);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/GenericLayerTests.cpp b/test/GenericLayerTests.cpp
index 188c7b1..99e0c62 100644
--- a/test/GenericLayerTests.cpp
+++ b/test/GenericLayerTests.cpp
@@ -6,11 +6,12 @@
 
 #include "../1.0/HalPolicy.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <log/log.h>
 
-BOOST_AUTO_TEST_SUITE(GenericLayerTests)
+TEST_SUITE("GenericLayerTests")
+{
 
 using namespace android::hardware;
 using namespace driverTestHelpers;
@@ -18,7 +19,7 @@
 
 using HalPolicy = hal_1_0::HalPolicy;
 
-BOOST_AUTO_TEST_CASE(GetSupportedOperations)
+TEST_CASE("GetSupportedOperations")
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
@@ -52,9 +53,9 @@
     model0.operations[0].outputs = hidl_vec<uint32_t>{4};
 
     driver->getSupportedOperations(model0, cb);
-    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
-    BOOST_TEST(supported.size() == (size_t)1);
-    BOOST_TEST(supported[0] == true);
+    CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+    CHECK(supported.size() == (size_t)1);
+    CHECK(supported[0] == true);
 
     V1_0::Model model1 = {};
 
@@ -81,8 +82,8 @@
 
     driver->getSupportedOperations(model1, cb);
 
-    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
-    BOOST_TEST(supported.empty());
+    CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
+    CHECK(supported.empty());
 
     // Test Broadcast on add/mul operators
     HalPolicy::Model model2 = {};
@@ -114,10 +115,10 @@
     model2.operations[1].outputs = hidl_vec<uint32_t>{4};
 
     driver->getSupportedOperations(model2, cb);
-    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
-    BOOST_TEST(supported.size() == (size_t)2);
-    BOOST_TEST(supported[0] == true);
-    BOOST_TEST(supported[1] == true);
+    CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+    CHECK(supported.size() == (size_t)2);
+    CHECK(supported[0] == true);
+    CHECK(supported[1] == true);
 
     V1_0::Model model3 = {};
 
@@ -143,9 +144,9 @@
     model3.operations[0].outputs = hidl_vec<uint32_t>{3, 4};
 
     driver->getSupportedOperations(model3, cb);
-    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
-    BOOST_TEST(supported.size() == (size_t)1);
-    BOOST_TEST(supported[0] == false);
+    CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+    CHECK(supported.size() == (size_t)1);
+    CHECK(supported[0] == false);
 
     HalPolicy::Model model4 = {};
 
@@ -158,14 +159,14 @@
     model4.operations[0].outputs = hidl_vec<uint32_t>{0};
 
     driver->getSupportedOperations(model4, cb);
-    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
-    BOOST_TEST(supported.empty());
+    CHECK((int)errorStatus == (int)V1_0::ErrorStatus::INVALID_ARGUMENT);
+    CHECK(supported.empty());
 }
 
 // The purpose of this test is to ensure that when encountering an unsupported operation
 // it is skipped and getSupportedOperations() continues (rather than failing and stopping).
 // As per IVGCVSW-710.
-BOOST_AUTO_TEST_CASE(UnsupportedLayerContinueOnFailure)
+TEST_CASE("UnsupportedLayerContinueOnFailure")
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
@@ -240,16 +241,16 @@
 
     // We are testing that the unsupported layers return false and the test continues rather than failing and stopping
     driver->getSupportedOperations(model, cb);
-    BOOST_TEST((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
-    BOOST_TEST(supported.size() == (size_t)3);
-    BOOST_TEST(supported[0] == false);
-    BOOST_TEST(supported[1] == true);
-    BOOST_TEST(supported[2] == false);
+    CHECK((int)errorStatus == (int)V1_0::ErrorStatus::NONE);
+    CHECK(supported.size() == (size_t)3);
+    CHECK(supported[0] == false);
+    CHECK(supported[1] == true);
+    CHECK(supported[2] == false);
 }
 
 // The purpose of this test is to ensure that when encountering an failure
 // during mem pool mapping we properly report an error to the framework via a callback
-BOOST_AUTO_TEST_CASE(ModelToINetworkConverterMemPoolFail)
+TEST_CASE("ModelToINetworkConverterMemPoolFail")
 {
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
@@ -268,8 +269,8 @@
 
     // Memory pool mapping should fail, we should report an error
     driver->getSupportedOperations(model, cb);
-    BOOST_TEST((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
-    BOOST_TEST(supported.empty());
+    CHECK((int)errorStatus != (int)V1_0::ErrorStatus::NONE);
+    CHECK(supported.empty());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/Lstm.hpp b/test/Lstm.hpp
index 2cb3c26..e384446 100644
--- a/test/Lstm.hpp
+++ b/test/Lstm.hpp
@@ -9,7 +9,7 @@
 
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/math/special_functions/relative_difference.hpp>
+#include <doctest/doctest.h>
 
 #include <array>
 
@@ -40,26 +40,6 @@
     return inputRequestArgument;
 }
 
-// Returns true if the relative difference between two float values is less than the tolerance value given.
-// This is used because the floating point comparison tolerance (set on each BOOST_AUTO_TEST_CASE) does not work!
-bool TolerantCompareEqual(float a, float b, float tolerance = 0.00001f)
-{
-    float rd;
-    if (a == 0.0f)
-    {
-        rd = fabs(b);
-    }
-    else if (b == 0.0f)
-    {
-        rd = fabs(a);
-    }
-    else
-    {
-        rd = boost::math::relative_difference(a, b);
-    }
-    return rd < tolerance;
-}
-
 // Helper function to create an OperandLifeTime::NO_VALUE for testing.
 // To be used on optional input operands that have no values - these are valid and should be tested.
 V1_0::OperandLifeTime CreateNoValueLifeTime(const hidl_vec<uint32_t>& dimensions)
@@ -100,12 +80,6 @@
 
 } // anonymous namespace
 
-#ifndef ARMCOMPUTECL_ENABLED
-static const std::array<armnn::Compute, 1> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef }};
-#else
-static const std::array<armnn::Compute, 2> COMPUTE_DEVICES = {{ armnn::Compute::CpuRef, armnn::Compute::GpuAcc }};
-#endif
-
 // Add our own tests here since we fail the lstm tests which Google supplies (because of non-const weights)
 template <typename HalPolicy>
 void LstmTestImpl(const hidl_vec<uint32_t>&   inputDimensions,
@@ -394,18 +368,18 @@
     // check the results
     for (size_t i = 0; i < outputStateOutValue.size(); ++i)
     {
-        BOOST_TEST(TolerantCompareEqual(outputStateOutValue[i], outputStateOutData[i]),
-                   "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
+        CHECK_MESSAGE(outputStateOutValue[i] == doctest::Approx( outputStateOutData[i] ),
+                      "outputStateOut[" << i << "]: " << outputStateOutValue[i] << " != " << outputStateOutData[i]);
     }
     for (size_t i = 0; i < cellStateOutValue.size(); ++i)
     {
-        BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i]),
-                   "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
+        CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
+                      "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
     }
     for (size_t i = 0; i < outputValue.size(); ++i)
     {
-        BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i]),
-                   "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+        CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
+                      "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
     }
 }
 
@@ -669,13 +643,13 @@
     // check the results
     for (size_t i = 0; i < cellStateOutValue.size(); ++i)
     {
-        BOOST_TEST(TolerantCompareEqual(cellStateOutValue[i], cellStateOutData[i], 1.0f),
-                   "cellStateOut[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
+        CHECK_MESSAGE(cellStateOutValue[i] == doctest::Approx( cellStateOutData[i] ),
+                      "cellStateOutValue[" << i << "]: " << cellStateOutValue[i] << " != " << cellStateOutData[i]);
     }
     for (size_t i = 0; i < outputValue.size(); ++i)
     {
-        BOOST_TEST(TolerantCompareEqual(outputValue[i], outputData[i], 1.0f),
-                   "output[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
+        CHECK_MESSAGE(outputValue[i] == doctest::Approx( outputData[i] ),
+                      "outputValue[" << i << "]: " << outputValue[i] << " != " << outputData[i]);
     }
 }
 
diff --git a/test/SystemProperties.cpp b/test/SystemProperties.cpp
index e1a2632..b1b6013 100644
--- a/test/SystemProperties.cpp
+++ b/test/SystemProperties.cpp
@@ -3,55 +3,56 @@
 // SPDX-License-Identifier: MIT
 //
 #include "DriverTestHelpers.hpp"
-#include <boost/test/unit_test.hpp>
 #include <log/log.h>
 #include "../SystemPropertiesUtils.hpp"
 
-BOOST_AUTO_TEST_SUITE(SystemProperiesTests)
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_CASE(SystemProperties)
+TEST_SUITE("SystemProperiesTests")
+{
+TEST_CASE("SystemProperties")
 {
     // Test default value
     {
         auto p = __system_property_find("thisDoesNotExist");
-        BOOST_TEST((p == nullptr));
+        CHECK((p == nullptr));
 
         int defaultValue = ParseSystemProperty("thisDoesNotExist", -4);
-        BOOST_TEST((defaultValue == -4));
+        CHECK((defaultValue == -4));
     }
 
     //  Test default value from bad data type
     {
         __system_property_set("thisIsNotFloat", "notfloat");
         float defaultValue = ParseSystemProperty("thisIsNotFloat", 0.1f);
-        BOOST_TEST((defaultValue == 0.1f));
+        CHECK((defaultValue == 0.1f));
     }
 
     // Test fetching bool values
     {
         __system_property_set("myTestBool", "1");
         bool b = ParseSystemProperty("myTestBool", false);
-        BOOST_TEST((b == true));
+        CHECK((b == true));
     }
     {
         __system_property_set("myTestBool", "0");
         bool b = ParseSystemProperty("myTestBool", true);
-        BOOST_TEST((b == false));
+        CHECK((b == false));
     }
 
     // Test fetching int
     {
         __system_property_set("myTestInt", "567");
         int i = ParseSystemProperty("myTestInt", 890);
-        BOOST_TEST((i==567));
+        CHECK((i==567));
     }
 
     // Test fetching float
     {
         __system_property_set("myTestFloat", "1.2f");
         float f = ParseSystemProperty("myTestFloat", 3.4f);
-        BOOST_TEST((f==1.2f));
+        CHECK((f==1.2f));
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/Tests.cpp b/test/Tests.cpp
index 0ef142d..a3a38b9 100644
--- a/test/Tests.cpp
+++ b/test/Tests.cpp
@@ -3,29 +3,33 @@
 // SPDX-License-Identifier: MIT
 //
 #define LOG_TAG "ArmnnDriverTests"
-#define BOOST_TEST_MODULE armnn_driver_tests
-#include <boost/test/unit_test.hpp>
 #include <log/log.h>
 
 #include "DriverTestHelpers.hpp"
 
-BOOST_AUTO_TEST_SUITE(DriverTests)
+#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#endif
+#include <doctest/doctest.h>
 
 using namespace android::hardware;
 using namespace driverTestHelpers;
 using namespace armnn_driver;
 
-BOOST_AUTO_TEST_CASE(Init)
+TEST_SUITE("DriverTests")
+{
+
+TEST_CASE("Init")
 {
     // Making the driver object on the stack causes a weird libc error, so make it on the heap instead
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
 
     V1_0::DeviceStatus status = driver->getStatus();
-    // Note double-parentheses to avoid compile error from Boost trying to printf the DeviceStatus
-    BOOST_TEST((status == V1_0::DeviceStatus::AVAILABLE));
+    // Note double-parentheses to avoid compile error from doctest trying to printf the DeviceStatus
+    CHECK((status == V1_0::DeviceStatus::AVAILABLE));
 }
 
-BOOST_AUTO_TEST_CASE(TestCapabilities)
+TEST_CASE("TestCapabilities")
 {
     // Making the driver object on the stack causes a weird libc error, so make it on the heap instead
     auto driver = std::make_unique<ArmnnDriver>(DriverOptions(armnn::Compute::CpuRef));
@@ -41,11 +45,11 @@
 
     driver->getCapabilities(cb);
 
-    BOOST_TEST((int)error == (int)V1_0::ErrorStatus::NONE);
-    BOOST_TEST(cap.float32Performance.execTime > 0.f);
-    BOOST_TEST(cap.float32Performance.powerUsage > 0.f);
-    BOOST_TEST(cap.quantized8Performance.execTime > 0.f);
-    BOOST_TEST(cap.quantized8Performance.powerUsage > 0.f);
+    CHECK((int)error == (int)V1_0::ErrorStatus::NONE);
+    CHECK(cap.float32Performance.execTime > 0.f);
+    CHECK(cap.float32Performance.powerUsage > 0.f);
+    CHECK(cap.quantized8Performance.execTime > 0.f);
+    CHECK(cap.quantized8Performance.powerUsage > 0.f);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/test/UtilsTests.cpp b/test/UtilsTests.cpp
index de84bb4..c9f6aad 100644
--- a/test/UtilsTests.cpp
+++ b/test/UtilsTests.cpp
@@ -4,7 +4,6 @@
 //
 
 #include "DriverTestHelpers.hpp"
-#include <boost/test/unit_test.hpp>
 #include <log/log.h>
 
 #include "../Utils.hpp"
@@ -18,6 +17,7 @@
 
 #include <Filesystem.hpp>
 
+#include <doctest/doctest.h>
 
 using namespace android;
 using namespace android::nn;
@@ -64,7 +64,6 @@
 
 } // armnn namespace
 
-BOOST_AUTO_TEST_SUITE(UtilsTests)
 
 // The following are helpers for writing unit tests for the driver.
 namespace
@@ -78,10 +77,9 @@
     ExportNetworkGraphFixture()
         : ExportNetworkGraphFixture("/data")
     {}
+
     ExportNetworkGraphFixture(const std::string& requestInputsAndOutputsDumpDir)
-        : m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
-        , m_FileName()
-        , m_FileStream()
+        : m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir), m_FileName(), m_FileStream()
     {
         // Set the name of the output .dot file.
         // NOTE: the export now uses a time stamp to name the file so we
@@ -97,7 +95,7 @@
         m_FileStream.close();
 
         // Ignore any error (such as file not found).
-        (void)remove(m_FileName.c_str());
+        (void) remove(m_FileName.c_str());
     }
 
     bool FileExists()
@@ -147,10 +145,12 @@
 };
 
 
-
 } // namespace
 
-BOOST_AUTO_TEST_CASE(ExportToEmptyDirectory)
+TEST_SUITE("UtilsTests")
+{
+
+TEST_CASE("ExportToEmptyDirectory")
 {
     // Set the fixture for this test.
     ExportNetworkGraphFixture fixture("");
@@ -167,13 +167,13 @@
 
     // Export the mock optimized network.
     fixture.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
-                                              fixture.m_RequestInputsAndOutputsDumpDir);
+                                                                   fixture.m_RequestInputsAndOutputsDumpDir);
 
     // Check that the output file does not exist.
-    BOOST_TEST(!fixture.FileExists());
+    CHECK(!fixture.FileExists());
 }
 
-BOOST_AUTO_TEST_CASE(ExportNetwork)
+TEST_CASE("ExportNetwork")
 {
     // Set the fixture for this test.
     ExportNetworkGraphFixture fixture;
@@ -191,16 +191,16 @@
 
     // Export the mock optimized network.
     fixture.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
-                                              fixture.m_RequestInputsAndOutputsDumpDir);
+                                                                   fixture.m_RequestInputsAndOutputsDumpDir);
 
     // Check that the output file exists and that it has the correct name.
-    BOOST_TEST(fixture.FileExists());
+    CHECK(fixture.FileExists());
 
     // Check that the content of the output file matches the mock content.
-    BOOST_TEST(fixture.GetFileContent() == mockSerializedContent);
+    CHECK(fixture.GetFileContent() == mockSerializedContent);
 }
 
-BOOST_AUTO_TEST_CASE(ExportNetworkOverwriteFile)
+TEST_CASE("ExportNetworkOverwriteFile")
 {
     // Set the fixture for this test.
     ExportNetworkGraphFixture fixture;
@@ -217,13 +217,13 @@
 
     // Export the mock optimized network.
     fixture.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
-                                              fixture.m_RequestInputsAndOutputsDumpDir);
+                                                                   fixture.m_RequestInputsAndOutputsDumpDir);
 
     // Check that the output file exists and that it has the correct name.
-    BOOST_TEST(fixture.FileExists());
+    CHECK(fixture.FileExists());
 
     // Check that the content of the output file matches the mock content.
-    BOOST_TEST(fixture.GetFileContent() == mockSerializedContent);
+    CHECK(fixture.GetFileContent() == mockSerializedContent);
 
     // Update the mock serialized content of the network.
     mockSerializedContent = "This is ANOTHER mock serialized content!";
@@ -235,16 +235,16 @@
 
     // Export the mock optimized network.
     fixture.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork2,
-                                              fixture.m_RequestInputsAndOutputsDumpDir);
+                                                                   fixture.m_RequestInputsAndOutputsDumpDir);
 
     // Check that the output file still exists and that it has the correct name.
-    BOOST_TEST(fixture.FileExists());
+    CHECK(fixture.FileExists());
 
     // Check that the content of the output file matches the mock content.
-    BOOST_TEST(fixture.GetFileContent() == mockSerializedContent);
+    CHECK(fixture.GetFileContent() == mockSerializedContent);
 }
 
-BOOST_AUTO_TEST_CASE(ExportMultipleNetworks)
+TEST_CASE("ExportMultipleNetworks")
 {
     // Set the fixtures for this test.
     ExportNetworkGraphFixture fixture1;
@@ -263,32 +263,32 @@
 
     // Export the mock optimized network.
     fixture1.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
-                                              fixture1.m_RequestInputsAndOutputsDumpDir);
+                                                                    fixture1.m_RequestInputsAndOutputsDumpDir);
 
     // Check that the output file exists and that it has the correct name.
-    BOOST_TEST(fixture1.FileExists());
+    CHECK(fixture1.FileExists());
 
     // Check that the content of the output file matches the mock content.
-    BOOST_TEST(fixture1.GetFileContent() == mockSerializedContent);
+    CHECK(fixture1.GetFileContent() == mockSerializedContent);
 
     // Export the mock optimized network.
     fixture2.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
-                                              fixture2.m_RequestInputsAndOutputsDumpDir);
+                                                                    fixture2.m_RequestInputsAndOutputsDumpDir);
 
     // Check that the output file exists and that it has the correct name.
-    BOOST_TEST(fixture2.FileExists());
+    CHECK(fixture2.FileExists());
 
     // Check that the content of the output file matches the mock content.
-    BOOST_TEST(fixture2.GetFileContent() == mockSerializedContent);
+    CHECK(fixture2.GetFileContent() == mockSerializedContent);
 
     // Export the mock optimized network.
     fixture3.m_FileName = armnn_driver::ExportNetworkGraphToDotFile(mockOptimizedNetwork,
-                                              fixture3.m_RequestInputsAndOutputsDumpDir);
+                                                                    fixture3.m_RequestInputsAndOutputsDumpDir);
     // Check that the output file exists and that it has the correct name.
-    BOOST_TEST(fixture3.FileExists());
+    CHECK(fixture3.FileExists());
 
     // Check that the content of the output file matches the mock content.
-    BOOST_TEST(fixture3.GetFileContent() == mockSerializedContent);
+    CHECK(fixture3.GetFileContent() == mockSerializedContent);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}